code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number > 0:
_lowerCAmelCase : Dict = number % 1_0
sum_of_digits += last_digit
_lowerCAmelCase : Dict = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def lowercase (_A = 1_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = factorial(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = split_and_add(lowerCAmelCase__ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=0.6 , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Tuple = mask_ratio
_lowerCAmelCase : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCAmelCase : str = (image_size // patch_size) ** 2
_lowerCAmelCase : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def a ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowerCAmelCase : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowerCAmelCase : Tuple = model(UpperCamelCase__ )
_lowerCAmelCase : str = (self.image_size // self.patch_size) ** 2
_lowerCAmelCase : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCAmelCase : str = 1
_lowerCAmelCase : Tuple = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = model(UpperCamelCase__ )
_lowerCAmelCase : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase : Dict = config_and_inputs
_lowerCAmelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__magic_name__ = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ViTMAEModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(UpperCamelCase__ )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[str] = [*signature.parameters.keys()]
_lowerCAmelCase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
np.random.seed(2 )
_lowerCAmelCase : int = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase : int = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCAmelCase : Dict = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_lowerCAmelCase : List[str] = outputs[0].cpu().numpy()
_lowerCAmelCase : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
_lowerCAmelCase : str = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
_lowerCAmelCase : Tuple = after_outputs[0].cpu().numpy()
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
@slow
def a ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def a ( self ):
'''simple docstring'''
np.random.seed(2 )
_lowerCAmelCase : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(UpperCamelCase__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : List[str] = prepare_img()
_lowerCAmelCase : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCAmelCase : int = ViTMAEConfig()
_lowerCAmelCase : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCAmelCase : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
_lowerCAmelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_lowerCAmelCase : Dict = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1E-4 ) )
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__magic_name__ = None
def lowercase (_A , _A , ):
"""simple docstring"""
import pyspark
def generate_fn():
_lowerCAmelCase : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
_lowerCAmelCase : Dict = df_with_partition_id.select('*' ).where(f'part_id = {partition_id}' ).drop('part_id' )
_lowerCAmelCase : List[Any] = partition_df.collect()
_lowerCAmelCase : List[Any] = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : str = df
_lowerCAmelCase : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
_lowerCAmelCase : Optional[int] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.split_shard_indices_by_worker(snake_case__ , snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
@property
def a ( self ):
'''simple docstring'''
return len(self.partition_order )
class UpperCamelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__magic_name__ = SparkConfig
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
import pyspark
_lowerCAmelCase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowerCAmelCase : List[Any] = df
_lowerCAmelCase : List[str] = working_dir
super().__init__(
cache_dir=snake_case__ , config_name=str(self.df.semanticHash() ) , **snake_case__ , )
def a ( self ):
'''simple docstring'''
def create_cache_and_write_probe(snake_case__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case__ )
_lowerCAmelCase : Tuple = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowerCAmelCase : Optional[int] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a ( self , snake_case__ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a ( self , snake_case__ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(snake_case__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
_lowerCAmelCase : Optional[Any] = self.df.count()
_lowerCAmelCase : int = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowerCAmelCase : int = (
self.df.limit(snake_case__ )
.repartition(1 )
.mapInArrow(snake_case__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowerCAmelCase : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowerCAmelCase : List[Any] = min(snake_case__ , int(approx_total_size / max_shard_size ) )
_lowerCAmelCase : Tuple = self.df.repartition(snake_case__ )
def a ( self , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
import pyspark
_lowerCAmelCase : Optional[int] = ParquetWriter if file_format == 'parquet' else ArrowWriter
_lowerCAmelCase : Optional[int] = os.path.join(self._working_dir , os.path.basename(snake_case__ ) ) if self._working_dir else fpath
_lowerCAmelCase : str = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowerCAmelCase : List[str] = self.config.features
_lowerCAmelCase : str = self._writer_batch_size
_lowerCAmelCase : Tuple = self._fs.storage_options
def write_arrow(snake_case__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowerCAmelCase : str = pyspark.TaskContext().taskAttemptId()
_lowerCAmelCase : Dict = next(snake_case__ , snake_case__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Any = writer_class(
features=snake_case__ , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
_lowerCAmelCase : Union[str, Any] = pa.Table.from_batches([first_batch] )
writer.write_table(snake_case__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowerCAmelCase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
_lowerCAmelCase : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
_lowerCAmelCase : Any = pa.Table.from_batches([batch] )
writer.write_table(snake_case__ )
if writer._num_bytes > 0:
_lowerCAmelCase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case__ ) ):
_lowerCAmelCase : Any = os.path.join(os.path.dirname(snake_case__ ) , os.path.basename(snake_case__ ) )
shutil.move(snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = (
self.df.mapInArrow(snake_case__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a ( self , snake_case__ , snake_case__ = "arrow" , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
self._validate_cache_dir()
_lowerCAmelCase : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case__ )
_lowerCAmelCase : Dict = not is_remote_filesystem(self._fs )
_lowerCAmelCase : Any = os.path.join if is_local else posixpath.join
_lowerCAmelCase : List[str] = '-TTTTT-SSSSS-of-NNNNN'
_lowerCAmelCase : List[Any] = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
_lowerCAmelCase : Optional[int] = path_join(self._output_dir , snake_case__ )
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Tuple = []
for task_id, content in self._prepare_split_single(snake_case__ , snake_case__ , snake_case__ ):
(
_lowerCAmelCase
) : Optional[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case__ )
_lowerCAmelCase : Union[str, Any] = total_num_examples
_lowerCAmelCase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
_lowerCAmelCase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowerCAmelCase : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
snake_case__ , snake_case__ , snake_case__ , ):
rename(
snake_case__ , fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace('TTTTT-SSSSS' , F'{global_shard_id:05d}' ).replace('NNNNN' , F'{total_shards:05d}' ) , )
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = 0
for i in range(len(snake_case__ ) ):
_lowerCAmelCase : List[str] = task_id_and_num_shards[i]
for shard_id in range(snake_case__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case__ , len(snake_case__ ) ).map(lambda snake_case__ : _rename_shard(*snake_case__ ) ).collect()
else:
# don't use any pattern
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : int = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace(snake_case__ , '' ) , )
def a ( self , snake_case__ , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : int = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowerCAmelCase : Dict = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
_lowerCAmelCase : int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
_lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def a ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : str = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
_lowerCAmelCase : int = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
_lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : str = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(UpperCamelCase__ , return_tensors='np' )
_lowerCAmelCase : Dict = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : Tuple = 'lower newer'
_lowerCAmelCase : Optional[int] = processor(text=UpperCamelCase__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : Any = 'lower newer'
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(UpperCamelCase__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : str = 'lower newer'
_lowerCAmelCase : Any = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
from collections.abc import Generator
def lowercase ():
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = 0, 1
while True:
_lowerCAmelCase , _lowerCAmelCase : str = b, a + b
yield b
def lowercase (_A = 1_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = 1
_lowerCAmelCase : str = fibonacci_generator()
while len(str(next(snake_case_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class UpperCamelCase__ ( _a ):
"""simple docstring"""
__magic_name__ = """xlnet"""
__magic_name__ = ["""mems"""]
__magic_name__ = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case__=3_2000 , snake_case__=1024 , snake_case__=24 , snake_case__=16 , snake_case__=4096 , snake_case__="gelu" , snake_case__=True , snake_case__="bi" , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=0.1 , snake_case__=512 , snake_case__=None , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=-1 , snake_case__=False , snake_case__="last" , snake_case__=True , snake_case__="tanh" , snake_case__=0.1 , snake_case__=5 , snake_case__=5 , snake_case__=5 , snake_case__=1 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Union[str, Any] = d_model
_lowerCAmelCase : Optional[Any] = n_layer
_lowerCAmelCase : List[str] = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_lowerCAmelCase : Tuple = d_model // n_head
_lowerCAmelCase : Optional[int] = ff_activation
_lowerCAmelCase : int = d_inner
_lowerCAmelCase : Any = untie_r
_lowerCAmelCase : List[Any] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : int = dropout
_lowerCAmelCase : int = mem_len
_lowerCAmelCase : Optional[Any] = reuse_len
_lowerCAmelCase : List[Any] = bi_data
_lowerCAmelCase : List[Any] = clamp_len
_lowerCAmelCase : Tuple = same_length
_lowerCAmelCase : Optional[Any] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : int = summary_activation
_lowerCAmelCase : Any = summary_last_dropout
_lowerCAmelCase : Dict = start_n_top
_lowerCAmelCase : Dict = end_n_top
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Tuple = pad_token_id
_lowerCAmelCase : Union[str, Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , __lowerCAmelCase , )
_lowerCAmelCase : Optional[int] = kwargs['use_cache']
_lowerCAmelCase : str = use_mems_eval
_lowerCAmelCase : List[str] = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def a ( self ):
'''simple docstring'''
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def a ( self , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase__ ( snake_case_ , snake_case_ ):
"""simple docstring"""
__magic_name__ = "pixel_values"
__magic_name__ = False
__magic_name__ = TimmBackboneConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(self , 'timm' )
super().__init__(_A )
_lowerCAmelCase : Union[str, Any] = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(_A , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
_lowerCAmelCase : Tuple = getattr(_A , 'use_pretrained_backbone' , _A )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
_lowerCAmelCase : Union[str, Any] = config.out_indices if getattr(_A , 'out_indices' , _A ) is not None else (-1,)
_lowerCAmelCase : Optional[int] = timm.create_model(
config.backbone , pretrained=_A , features_only=config.features_only , in_chans=config.num_channels , out_indices=_A , **_A , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_lowerCAmelCase : Optional[int] = self._backbone.return_layers
_lowerCAmelCase : Dict = {layer['module']: str(_A ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_A )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
_lowerCAmelCase : int = kwargs.pop('config' , TimmBackboneConfig() )
_lowerCAmelCase : Optional[Any] = kwargs.pop('use_timm_backbone' , _A )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
_lowerCAmelCase : Tuple = kwargs.pop('num_channels' , config.num_channels )
_lowerCAmelCase : int = kwargs.pop('features_only' , config.features_only )
_lowerCAmelCase : str = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
_lowerCAmelCase : Optional[int] = kwargs.pop('out_indices' , config.out_indices )
_lowerCAmelCase : Optional[Any] = TimmBackboneConfig(
backbone=_A , num_channels=_A , features_only=_A , use_pretrained_backbone=_A , out_indices=_A , )
return super()._from_config(_A , **_A )
def a ( self , snake_case__ ):
'''simple docstring'''
pass
def a ( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Union[str, Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_lowerCAmelCase : Optional[int] = self._all_layers
_lowerCAmelCase : Any = self._backbone(_A , **_A )
_lowerCAmelCase : List[str] = self._return_layers
_lowerCAmelCase : int = tuple(hidden_states[i] for i in self.out_indices )
else:
_lowerCAmelCase : int = self._backbone(_A , **_A )
_lowerCAmelCase : Any = None
_lowerCAmelCase : Any = tuple(_A )
_lowerCAmelCase : int = tuple(_A ) if hidden_states is not None else None
if not return_dict:
_lowerCAmelCase : Optional[int] = (feature_maps,)
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_A , hidden_states=_A , attentions=_A )
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = 42 # [batch_size x 3]
__magic_name__ = 42 # [batch_size x 3]
__magic_name__ = 42 # [batch_size x 3]
__magic_name__ = 42 # [batch_size x 3]
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
def a ( self ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a ( self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a ( self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch.arange(self.height * self.width )
_lowerCAmelCase : int = torch.stack(
[
pixel_indices % self.width,
torch.div(snake_case_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.shape
_lowerCAmelCase : Tuple = int(np.prod(snake_case_ ) )
_lowerCAmelCase : int = self.get_image_coords()
_lowerCAmelCase : Optional[int] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_lowerCAmelCase : Union[str, Any] = self.get_camera_rays(snake_case_ )
_lowerCAmelCase : Tuple = rays.view(snake_case_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_lowerCAmelCase : Optional[int] = coords.view(snake_case_ , -1 , 2 )
_lowerCAmelCase : Dict = self.resolution()
_lowerCAmelCase : List[Any] = self.fov()
_lowerCAmelCase : Any = (flat.float() / (res - 1)) * 2 - 1
_lowerCAmelCase : Any = fracs * torch.tan(fov / 2 )
_lowerCAmelCase : Tuple = fracs.view(snake_case_ , -1 , 2 )
_lowerCAmelCase : int = (
self.z.view(snake_case_ , 1 , 3 )
+ self.x.view(snake_case_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(snake_case_ , 1 , 3 ) * fracs[:, :, 1:]
)
_lowerCAmelCase : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=snake_case_ )
_lowerCAmelCase : List[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(snake_case_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(snake_case_ , *snake_case_ , 2 , 3 )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case_ , height=snake_case_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
_lowerCAmelCase : Optional[Any] = np.array([np.sin(lowerCAmelCase__ ), np.cos(lowerCAmelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_lowerCAmelCase : str = -z * 4
_lowerCAmelCase : Tuple = np.array([np.cos(lowerCAmelCase__ ), -np.sin(lowerCAmelCase__ ), 0.0] )
_lowerCAmelCase : Tuple = np.cross(lowerCAmelCase__ , lowerCAmelCase__ )
origins.append(lowerCAmelCase__ )
xs.append(lowerCAmelCase__ )
ys.append(lowerCAmelCase__ )
zs.append(lowerCAmelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase__ , axis=0 ) ).float() , width=lowerCAmelCase__ , height=lowerCAmelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase__ )) , )
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "facebook/bart-large-mnli"
__magic_name__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__magic_name__ = "text_classifier"
__magic_name__ = AutoTokenizer
__magic_name__ = AutoModelForSequenceClassification
__magic_name__ = ["text", ["text"]]
__magic_name__ = ["text"]
def a ( self ):
'''simple docstring'''
super().setup()
_lowerCAmelCase : Any = self.model.config
_lowerCAmelCase : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
_lowerCAmelCase : List[Any] = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F'This example is {label}' for label in labels] , return_tensors='pt' , padding='max_length' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = outputs.logits
_lowerCAmelCase : Union[str, Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowercase (_A ):
"""simple docstring"""
return EnvironmentCommand()
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=a__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = huggingface_hub.__version__
_lowerCAmelCase : str = 'not installed'
_lowerCAmelCase : List[Any] = 'NA'
if is_torch_available():
import torch
_lowerCAmelCase : Optional[int] = torch.__version__
_lowerCAmelCase : Union[str, Any] = torch.cuda.is_available()
_lowerCAmelCase : Any = 'not installed'
if is_transformers_available():
import transformers
_lowerCAmelCase : Dict = transformers.__version__
_lowerCAmelCase : int = 'not installed'
if is_accelerate_available():
import accelerate
_lowerCAmelCase : int = accelerate.__version__
_lowerCAmelCase : str = 'not installed'
if is_xformers_available():
import xformers
_lowerCAmelCase : Optional[Any] = xformers.__version__
_lowerCAmelCase : Union[str, Any] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(a__ ) )
return info
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
return base * power(_A , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
lowerCAmelCase : int = int(input("""Enter the base: """).strip())
lowerCAmelCase : Tuple = int(input("""Enter the exponent: """).strip())
lowerCAmelCase : Tuple = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase : Optional[int] = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
def lowercase (_A , _A = False ):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
_lowerCAmelCase : str = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
_lowerCAmelCase : Union[str, Any] = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(lowerCamelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
_lowerCAmelCase : List[Any] = primes[:idx]
break
_lowerCAmelCase : Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_lowerCAmelCase : Tuple = False
for r in range(lowerCamelCase_ ):
_lowerCAmelCase : Optional[Any] = pow(lowerCamelCase_ , d * 2**r , lowerCamelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_lowerCAmelCase : Tuple = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowercase ():
"""simple docstring"""
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = """megatron-bert"""
def __init__( self , snake_case__=2_9056 , snake_case__=1024 , snake_case__=24 , snake_case__=16 , snake_case__=4096 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=0 , snake_case__="absolute" , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Optional[Any] = position_embedding_type
_lowerCAmelCase : Dict = use_cache
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
__magic_name__ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__magic_name__ = Features({"text": Value("string" )} )
__magic_name__ = Features({"labels": ClassLabel} )
__magic_name__ = "text"
__magic_name__ = "labels"
def a ( self , snake_case__ ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
_lowerCAmelCase : List[Any] = copy.deepcopy(self )
_lowerCAmelCase : str = self.label_schema.copy()
_lowerCAmelCase : List[Any] = features[self.label_column]
_lowerCAmelCase : Optional[Any] = label_schema
return task_template
@property
def a ( self ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
_lowerCAmelCase : Union[str, Any] = {
'input_ids': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_lowerCAmelCase : Optional[int] = model(A__ )['last_hidden_state']
_lowerCAmelCase : Optional[int] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class UpperCamelCase__ ( __a , __a ):
"""simple docstring"""
__magic_name__ = 1
@register_to_config
def __init__( self , snake_case__=2000 , snake_case__=0.1 , snake_case__=20 , snake_case__=1E-3 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = None
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.linspace(1 , self.config.sampling_eps , a__ , device=a__ )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowerCAmelCase : Optional[int] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowerCAmelCase : Any = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowerCAmelCase : Optional[int] = std.flatten()
while len(std.shape ) < len(score.shape ):
_lowerCAmelCase : List[Any] = std.unsqueeze(-1 )
_lowerCAmelCase : int = -score / std
# compute
_lowerCAmelCase : Tuple = -1.0 / len(self.timesteps )
_lowerCAmelCase : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowerCAmelCase : Union[str, Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowerCAmelCase : List[Any] = beta_t.unsqueeze(-1 )
_lowerCAmelCase : List[Any] = -0.5 * beta_t * x
_lowerCAmelCase : Optional[Any] = torch.sqrt(a__ )
_lowerCAmelCase : Any = drift - diffusion**2 * score
_lowerCAmelCase : Any = x + drift * dt
# add noise
_lowerCAmelCase : Optional[int] = randn_tensor(x.shape , layout=x.layout , generator=a__ , device=x.device , dtype=x.dtype )
_lowerCAmelCase : Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=2 , snake_case__=32 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=4 , snake_case__=[0, 1, 2, 3] , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=3 , snake_case__=[1, 384, 24, 24] , snake_case__=True , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : str = backbone_out_indices
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : int = backbone_featmap_shape
_lowerCAmelCase : Union[str, Any] = scope
_lowerCAmelCase : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Any = (image_size // patch_size) ** 2
_lowerCAmelCase : Optional[Any] = num_patches + 1
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__A , backbone_featmap_shape=self.backbone_featmap_shape , )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = DPTModel(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : List[Any] = DPTForDepthEstimation(__A )
model.to(__A )
model.eval()
_lowerCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Tuple = DPTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
_lowerCAmelCase : Dict = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.prepare_config_and_inputs()
_lowerCAmelCase : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__magic_name__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = DPTModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(__A )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__A )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
def a ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : str = True
if model_class in get_values(__A ):
continue
_lowerCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.train()
_lowerCAmelCase : str = self._prepare_for_class(__A , __A , return_labels=__A )
_lowerCAmelCase : Union[str, Any] = model(**__A ).loss
loss.backward()
def a ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = False
_lowerCAmelCase : int = True
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
_lowerCAmelCase : Dict = model_class(__A )
model.to(__A )
model.gradient_checkpointing_enable()
model.train()
_lowerCAmelCase : List[str] = self._prepare_for_class(__A , __A , return_labels=__A )
_lowerCAmelCase : Any = model(**__A ).loss
loss.backward()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(config=__A )
# Skip the check for the backbone
_lowerCAmelCase : Dict = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowerCAmelCase : Optional[Any] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
@slow
def a ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowerCAmelCase : Optional[int] = DPTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = '''add'''
with self.assertRaises(__A ):
_lowerCAmelCase : Dict = DPTForDepthEstimation(__A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_lowerCAmelCase : Tuple = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(__A )
_lowerCAmelCase : List[Any] = prepare_img()
_lowerCAmelCase : Union[str, Any] = image_processor(images=__A , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**__A )
_lowerCAmelCase : int = outputs.predicted_depth
# verify the predicted depth
_lowerCAmelCase : Tuple = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __A )
_lowerCAmelCase : Dict = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __A , atol=1E-4 ) )
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : Tuple = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowercase (_A , _A , _A=8 ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
_lowerCAmelCase : List[Any] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , movq=__a , )
_lowerCAmelCase : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if latents is None:
_lowerCAmelCase : int = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCAmelCase : List[str] = latents.to(__a )
_lowerCAmelCase : List[str] = latents * scheduler.init_noise_sigma
return latents
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : Any = len(__a ) if isinstance(__a , __a ) else 1
# get prompt text embeddings
_lowerCAmelCase : Dict = self.tokenizer(
__a , padding='max_length' , truncation=__a , max_length=77 , return_attention_mask=__a , add_special_tokens=__a , return_tensors='pt' , )
_lowerCAmelCase : Optional[Any] = text_inputs.input_ids
_lowerCAmelCase : Optional[int] = self.tokenizer(__a , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__a , __a ):
_lowerCAmelCase : Tuple = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowerCAmelCase : Optional[Any] = text_input_ids.to(__a )
_lowerCAmelCase : str = text_inputs.attention_mask.to(__a )
_lowerCAmelCase , _lowerCAmelCase : Dict = self.text_encoder(
input_ids=__a , attention_mask=__a )
_lowerCAmelCase : Union[str, Any] = prompt_embeds.repeat_interleave(__a , dim=0 )
_lowerCAmelCase : List[str] = text_encoder_hidden_states.repeat_interleave(__a , dim=0 )
_lowerCAmelCase : str = text_mask.repeat_interleave(__a , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : Optional[int] = 42
if negative_prompt is None:
_lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(__a ) is not type(__a ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !='
F' {type(__a )}.' )
elif isinstance(__a , __a ):
_lowerCAmelCase : Tuple = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
_lowerCAmelCase : Optional[int] = negative_prompt
_lowerCAmelCase : Dict = self.tokenizer(
__a , padding='max_length' , max_length=77 , truncation=__a , return_attention_mask=__a , add_special_tokens=__a , return_tensors='pt' , )
_lowerCAmelCase : Optional[Any] = uncond_input.input_ids.to(__a )
_lowerCAmelCase : Dict = uncond_input.attention_mask.to(__a )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.text_encoder(
input_ids=__a , attention_mask=__a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase : int = negative_prompt_embeds.shape[1]
_lowerCAmelCase : List[Any] = negative_prompt_embeds.repeat(1 , __a )
_lowerCAmelCase : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __a )
_lowerCAmelCase : Dict = uncond_text_encoder_hidden_states.shape[1]
_lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , __a , 1 )
_lowerCAmelCase : List[str] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , __a , -1 )
_lowerCAmelCase : Tuple = uncond_text_mask.repeat_interleave(__a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
_lowerCAmelCase : Dict = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
_lowerCAmelCase : Optional[int] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def a ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_lowerCAmelCase : Dict = torch.device(F'cuda:{gpu_id}' )
_lowerCAmelCase : List[str] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def a ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_lowerCAmelCase : Dict = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : Union[str, Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : List[str] = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
if self.safety_checker is not None:
_lowerCAmelCase , _lowerCAmelCase : Tuple = cpu_offload_with_hook(self.safety_checker , __a , prev_module_hook=__a )
# We'll offload the last model manually.
_lowerCAmelCase : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a ( self ):
'''simple docstring'''
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 100 , snake_case__ = 4.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
if isinstance(__a , __a ):
_lowerCAmelCase : Optional[Any] = 1
elif isinstance(__a , __a ):
_lowerCAmelCase : Union[str, Any] = len(__a )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(__a )}' )
_lowerCAmelCase : Tuple = self._execution_device
_lowerCAmelCase : Dict = batch_size * num_images_per_prompt
_lowerCAmelCase : List[str] = guidance_scale > 1.0
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self._encode_prompt(
__a , __a , __a , __a , __a )
if isinstance(__a , __a ):
_lowerCAmelCase : Tuple = torch.cat(__a , dim=0 )
if isinstance(__a , __a ):
_lowerCAmelCase : Dict = torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : Dict = image_embeds.repeat_interleave(__a , dim=0 )
_lowerCAmelCase : Dict = negative_image_embeds.repeat_interleave(__a , dim=0 )
_lowerCAmelCase : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
_lowerCAmelCase : Optional[int] = self.scheduler.timesteps
_lowerCAmelCase : Union[str, Any] = self.unet.config.in_channels
_lowerCAmelCase , _lowerCAmelCase : Any = get_new_h_w(__a , __a , self.movq_scale_factor )
# create initial latent
_lowerCAmelCase : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : Tuple = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
_lowerCAmelCase : Union[str, Any] = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : int = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Any = variance_pred.chunk(2 )
_lowerCAmelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Optional[Any] = self.scheduler.step(
__a , __a , __a , generator=__a , ).prev_sample
# post-processing
_lowerCAmelCase : List[Any] = self.movq.decode(__a , force_not_quantize=__a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_lowerCAmelCase : int = image * 0.5 + 0.5
_lowerCAmelCase : List[str] = image.clamp(0 , 1 )
_lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : Tuple = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""bert-base-uncased""": 5_12,
"""bert-large-uncased""": 5_12,
"""bert-base-cased""": 5_12,
"""bert-large-cased""": 5_12,
"""bert-base-multilingual-uncased""": 5_12,
"""bert-base-multilingual-cased""": 5_12,
"""bert-base-chinese""": 5_12,
"""bert-base-german-cased""": 5_12,
"""bert-large-uncased-whole-word-masking""": 5_12,
"""bert-large-cased-whole-word-masking""": 5_12,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_12,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_12,
"""bert-base-cased-finetuned-mrpc""": 5_12,
"""bert-base-german-dbmdz-cased""": 5_12,
"""bert-base-german-dbmdz-uncased""": 5_12,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_12,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_12,
"""wietsedv/bert-base-dutch-cased""": 5_12,
}
lowerCAmelCase : List[Any] = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class UpperCamelCase__ ( _snake_case ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = BertTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : List[str] = getattr(UpperCamelCase__ , normalizer_state.pop('type' ) )
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : int = strip_accents
_lowerCAmelCase : int = tokenize_chinese_chars
_lowerCAmelCase : Any = normalizer_class(**UpperCamelCase__ )
_lowerCAmelCase : List[str] = do_lower_case
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def a ( *snake_case__ , **snake_case__ ):
'''simple docstring'''
pass
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = np.array(snake_case__ )
_lowerCAmelCase : Dict = npimg.shape
return {"hash": hashimage(snake_case__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__magic_name__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def a ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_lowerCAmelCase : List[Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : Tuple = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'facebook/sam-vit-huge'
_lowerCAmelCase : List[Any] = pipeline('mask-generation' , model=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : List[str] = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : Dict = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
] , )
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
from math import loga
def lowercase (_A ):
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase : int = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowercase (_A=None ):
"""simple docstring"""
if subparsers is not None:
_lowerCAmelCase : Optional[Any] = subparsers.add_parser('tpu-config' , description=_description )
else:
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_lowerCAmelCase : List[Any] = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_A , default=_A , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_A , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_A , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_lowerCAmelCase : List[str] = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_A , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
_lowerCAmelCase : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_lowerCAmelCase : Tuple = defaults.command_file
if not args.command and defaults.commands is not None:
_lowerCAmelCase : Optional[int] = defaults.commands
if not args.tpu_name:
_lowerCAmelCase : Union[str, Any] = defaults.tpu_name
if not args.tpu_zone:
_lowerCAmelCase : Tuple = defaults.tpu_zone
if args.accelerate_version == "dev":
_lowerCAmelCase : List[Any] = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
_lowerCAmelCase : List[str] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , _A ):
_lowerCAmelCase : List[str] = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_lowerCAmelCase : str = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
_lowerCAmelCase : List[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_lowerCAmelCase : Union[str, Any] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
_lowerCAmelCase : int = '''; '''.join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_lowerCAmelCase : Optional[Any] = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_A )}' )
return
subprocess.run(_A )
print('Successfully setup pod.' )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = tpu_command_parser()
_lowerCAmelCase : List[str] = parser.parse_args()
tpu_command_launcher(_A )
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Any = FileLock(str(tmpdir / 'foo.lock' ) )
_lowerCAmelCase : int = FileLock(str(tmpdir / 'foo.lock' ) )
_lowerCAmelCase : str = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
_lowerCAmelCase : List[Any] = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = 'a' * 1_0_0_0 + '.lock'
_lowerCAmelCase : str = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_lowerCAmelCase : Optional[int] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase : List[str] = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : List[Any] = ["""text""", """image""", """audio"""]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(f'Invalid type requested: {input_type}' )
return inputs
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Any = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('text' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'Invalid output: {output}' )
return output_types
@is_tool_test
class UpperCamelCase__ :
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
_lowerCAmelCase : str = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCamelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_lowerCAmelCase : Tuple = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = create_inputs(self.tool.inputs )
_lowerCAmelCase : Dict = self.tool(*UpperCamelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
_lowerCAmelCase : Dict = [outputs]
self.assertListEqual(output_types(UpperCamelCase__ ) , self.tool.outputs )
def a ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = create_inputs(self.tool.inputs )
_lowerCAmelCase : str = self.tool(*UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_lowerCAmelCase : Any = [outputs]
self.assertEqual(len(UpperCamelCase__ ) , len(self.tool.outputs ) )
for output, output_type in zip(UpperCamelCase__ , self.tool.outputs ):
_lowerCAmelCase : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = create_inputs(self.tool.inputs )
_lowerCAmelCase : List[Any] = []
for _input, input_type in zip(UpperCamelCase__ , self.tool.inputs ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_lowerCAmelCase : Union[str, Any] = self.tool(*UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_lowerCAmelCase : Union[str, Any] = [outputs]
self.assertEqual(len(UpperCamelCase__ ) , len(self.tool.outputs ) )
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : List[str] = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
def __call__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = self.unet(A_ , A_ ).sample
_lowerCAmelCase : Optional[int] = self.scheduler.step(A_ , A_ , A_ ).prev_sample
_lowerCAmelCase : Any = scheduler_output - scheduler_output + torch.ones_like(A_ )
return result
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
create_state_space_tree(_A , [] , 0 )
def lowercase (_A , _A , _A ):
"""simple docstring"""
if index == len(_A ):
print(_A )
return
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase (_A ):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_A ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_lowerCAmelCase : List[Any] = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
_lowerCAmelCase : Dict = PipelineDataFormat.from_str(
format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_A , _A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = nlp
_lowerCAmelCase : List[str] = reader
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=snake_case__ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=snake_case__ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=snake_case__ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=snake_case__ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=snake_case__ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=snake_case__ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=snake_case__ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=snake_case__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self._nlp, []
for entry in self._reader:
_lowerCAmelCase : Optional[int] = nlp(**snake_case__ ) if self._reader.is_multi_columns else nlp(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
outputs.append(snake_case__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_lowerCAmelCase : str = self._reader.save_binary(snake_case__ )
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(snake_case__ )
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = 4_2
__magic_name__ = None
__magic_name__ = None
lowerCAmelCase : Any = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowercase (_A ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(_A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCAmelCase : Optional[Any] = get_distrib(node.left )
_lowerCAmelCase : int = get_distrib(node.right )
_lowerCAmelCase : int = 1 - left_distrib_excess
_lowerCAmelCase : int = 1 - right_distrib_excess
_lowerCAmelCase : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
_lowerCAmelCase : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __get__( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
_lowerCAmelCase : Optional[int] = '__cached_' + self.fget.__name__
_lowerCAmelCase : List[Any] = getattr(snake_case__ , snake_case__ , snake_case__ )
if cached is None:
_lowerCAmelCase : Any = self.fget(snake_case__ )
setattr(snake_case__ , snake_case__ , snake_case__ )
return cached
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def lowercase (_A ):
"""simple docstring"""
if is_torch_fx_proxy(_A ):
return True
if is_torch_available():
import torch
if isinstance(_A , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_A , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_A , (jnp.ndarray, Tracer) ):
return True
return isinstance(_A , np.ndarray )
def lowercase (_A ):
"""simple docstring"""
return isinstance(_A , np.ndarray )
def lowercase (_A ):
"""simple docstring"""
return _is_numpy(_A )
def lowercase (_A ):
"""simple docstring"""
import torch
return isinstance(_A , torch.Tensor )
def lowercase (_A ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(_A )
def lowercase (_A ):
"""simple docstring"""
import torch
return isinstance(_A , torch.device )
def lowercase (_A ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(_A )
def lowercase (_A ):
"""simple docstring"""
import torch
if isinstance(_A , _A ):
if hasattr(_A , _A ):
_lowerCAmelCase : Optional[Any] = getattr(_A , _A )
else:
return False
return isinstance(_A , torch.dtype )
def lowercase (_A ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(_A )
def lowercase (_A ):
"""simple docstring"""
import tensorflow as tf
return isinstance(_A , tf.Tensor )
def lowercase (_A ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(_A )
def lowercase (_A ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_A , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(_A )
return type(_A ) == tf.Tensor
def lowercase (_A ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(_A )
def lowercase (_A ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(_A , jnp.ndarray )
def lowercase (_A ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(_A )
def lowercase (_A ):
"""simple docstring"""
if isinstance(_A , (dict, UserDict) ):
return {k: to_py_obj(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return [to_py_obj(_A ) for o in obj]
elif is_tf_tensor(_A ):
return obj.numpy().tolist()
elif is_torch_tensor(_A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_A ):
return np.asarray(_A ).tolist()
elif isinstance(_A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase (_A ):
"""simple docstring"""
if isinstance(_A , (dict, UserDict) ):
return {k: to_numpy(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return np.array(_A )
elif is_tf_tensor(_A ):
return obj.numpy()
elif is_torch_tensor(_A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_A ):
return np.asarray(_A )
else:
return obj
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = fields(self )
# Safety and consistency checks
if not len(snake_case__ ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
_lowerCAmelCase : List[str] = getattr(self , class_fields[0].name )
_lowerCAmelCase : str = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(snake_case__ ):
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Any = first_field.items()
_lowerCAmelCase : Any = True
else:
try:
_lowerCAmelCase : List[Any] = iter(snake_case__ )
_lowerCAmelCase : int = True
except TypeError:
_lowerCAmelCase : List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case__ ):
if (
not isinstance(snake_case__ , (list, tuple) )
or not len(snake_case__ ) == 2
or not isinstance(element[0] , snake_case__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowerCAmelCase : Optional[int] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_lowerCAmelCase : Dict = element[1]
elif first_field is not None:
_lowerCAmelCase : List[Any] = first_field
else:
for field in class_fields:
_lowerCAmelCase : str = getattr(self , field.name )
if v is not None:
_lowerCAmelCase : Any = v
def __delitem__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , snake_case__ ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case__ , snake_case__ )
super().__setattr__(snake_case__ , snake_case__ )
def __setitem__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__setitem__(snake_case__ , snake_case__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "longest"
__magic_name__ = "max_length"
__magic_name__ = "do_not_pad"
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "pt"
__magic_name__ = "tf"
__magic_name__ = "np"
__magic_name__ = "jax"
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = context_managers
_lowerCAmelCase : str = ExitStack()
def __enter__( self ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(snake_case__ )
def __exit__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
self.stack.__exit__(*snake_case__ , **snake_case__ )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = infer_framework(_A )
if framework == "tf":
_lowerCAmelCase : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = model_class.__name__
_lowerCAmelCase : Dict = infer_framework(_A )
if framework == "tf":
_lowerCAmelCase : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Any = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase (_A , _A = "" , _A = "." ):
"""simple docstring"""
def _flatten_dict(_A , _A="" , _A="." ):
for k, v in d.items():
_lowerCAmelCase : Dict = str(_A ) + delimiter + str(_A ) if parent_key else k
if v and isinstance(_A , _A ):
yield from flatten_dict(_A , _A , delimiter=_A ).items()
else:
yield key, v
return dict(_flatten_dict(_A , _A , _A ) )
@contextmanager
def lowercase (_A , _A = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase (_A , _A=None ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.transpose(_A , axes=_A )
elif is_torch_tensor(_A ):
return array.T if axes is None else array.permute(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.transpose(_A , perm=_A )
elif is_jax_tensor(_A ):
return jnp.transpose(_A , axes=_A )
else:
raise ValueError(f'Type not supported for transpose: {type(_A )}.' )
def lowercase (_A , _A ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.reshape(_A , _A )
elif is_torch_tensor(_A ):
return array.reshape(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.reshape(_A , _A )
elif is_jax_tensor(_A ):
return jnp.reshape(_A , _A )
else:
raise ValueError(f'Type not supported for reshape: {type(_A )}.' )
def lowercase (_A , _A=None ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.squeeze(_A , axis=_A )
elif is_torch_tensor(_A ):
return array.squeeze() if axis is None else array.squeeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.squeeze(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.squeeze(_A , axis=_A )
else:
raise ValueError(f'Type not supported for squeeze: {type(_A )}.' )
def lowercase (_A , _A ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.expand_dims(_A , _A )
elif is_torch_tensor(_A ):
return array.unsqueeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.expand_dims(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.expand_dims(_A , axis=_A )
else:
raise ValueError(f'Type not supported for expand_dims: {type(_A )}.' )
def lowercase (_A ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.size(_A )
elif is_torch_tensor(_A ):
return array.numel()
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.size(_A )
elif is_jax_tensor(_A ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(_A )}.' )
def lowercase (_A , _A ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(_A , (tuple, list) ):
_lowerCAmelCase : str = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowerCAmelCase : Dict = f'{repo_id}--{value}'
return auto_map
def lowercase (_A ):
"""simple docstring"""
for base_class in inspect.getmro(_A ):
_lowerCAmelCase : List[Any] = base_class.__module__
_lowerCAmelCase : str = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_A , _A ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_A ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCAmelCase : Tuple = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
return super().__call__(snake_case__ , **snake_case__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
return {}, {}, {}
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = load_image(snake_case__ )
_lowerCAmelCase : Optional[int] = image.size
_lowerCAmelCase : Union[str, Any] = self.image_processor(images=snake_case__ , return_tensors=self.framework )
return model_inputs
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model(**snake_case__ )
return model_outputs
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = model_outputs.predicted_depth
_lowerCAmelCase : Union[str, Any] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=snake_case__ )
_lowerCAmelCase : Dict = prediction.squeeze().cpu().numpy()
_lowerCAmelCase : Union[str, Any] = (output * 255 / np.max(snake_case__ )).astype('uint8' )
_lowerCAmelCase : Union[str, Any] = Image.fromarray(snake_case__ )
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Tuple = predicted_depth
_lowerCAmelCase : Union[str, Any] = depth
return output_dict
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "audio-spectrogram-transformer"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=16 , snake_case__=True , snake_case__=10 , snake_case__=10 , snake_case__=1024 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : Dict = qkv_bias
_lowerCAmelCase : Any = frequency_stride
_lowerCAmelCase : List[Any] = time_stride
_lowerCAmelCase : Any = max_length
_lowerCAmelCase : Optional[Any] = num_mel_bins
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase : str = random.Random()
if is_torch_available():
import torch
def lowercase (_A , _A=1.0 , _A=None , _A=None ):
"""simple docstring"""
if rng is None:
_lowerCAmelCase : List[Any] = global_rng
_lowerCAmelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=400 , snake_case__=2000 , snake_case__=1 , snake_case__=0.0 , snake_case__=1_6000 , snake_case__=True , snake_case__=True , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : str = min_seq_length
_lowerCAmelCase : Any = max_seq_length
_lowerCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Tuple = feature_size
_lowerCAmelCase : Union[str, Any] = padding_value
_lowerCAmelCase : Dict = sampling_rate
_lowerCAmelCase : Dict = return_attention_mask
_lowerCAmelCase : str = do_normalize
def a ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a ( self , snake_case__=False , snake_case__=False ):
'''simple docstring'''
def _flatten(snake_case__ ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
_lowerCAmelCase : Any = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCAmelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : str = [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = ASTFeatureExtractor
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ASTFeatureExtractionTester(self )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Optional[int] = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
_lowerCAmelCase : List[str] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
# Test batched
_lowerCAmelCase : str = feat_extract(snake_case__ , padding=snake_case__ , return_tensors='np' ).input_values
_lowerCAmelCase : List[Any] = feat_extract(snake_case__ , padding=snake_case__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase : Any = np.asarray(snake_case__ )
_lowerCAmelCase : Optional[Any] = feat_extract(snake_case__ , return_tensors='np' ).input_values
_lowerCAmelCase : str = feat_extract(snake_case__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
@require_torch
def a ( self ):
'''simple docstring'''
import torch
_lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : str = np.random.rand(100 ).astype(np.floataa )
_lowerCAmelCase : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase : Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCAmelCase : Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a ( self , snake_case__ ):
'''simple docstring'''
from datasets import load_dataset
_lowerCAmelCase : Any = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCAmelCase : Union[str, Any] = ds.sort('id' ).select(range(snake_case__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_lowerCAmelCase : List[Any] = self._load_datasamples(1 )
_lowerCAmelCase : Dict = ASTFeatureExtractor()
_lowerCAmelCase : str = feature_extractor(snake_case__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , snake_case__ , atol=1E-4 ) )
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase : int = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase : Optional[Any] = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def lowercase (_A , _A , _A , _A , _A = None , _A = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCAmelCase : Tuple = new_id
# turn into Numpy arrays
_lowerCAmelCase : Dict = np.array(_A )
_lowerCAmelCase : Any = np.array(_A )
if reduce_labels:
_lowerCAmelCase : Optional[int] = 2_5_5
_lowerCAmelCase : Optional[int] = label - 1
_lowerCAmelCase : Optional[int] = 2_5_5
_lowerCAmelCase : Any = label != ignore_index
_lowerCAmelCase : Dict = np.not_equal(_A , _A )
_lowerCAmelCase : Dict = pred_label[mask]
_lowerCAmelCase : Union[str, Any] = np.array(_A )[mask]
_lowerCAmelCase : Dict = pred_label[pred_label == label]
_lowerCAmelCase : str = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
_lowerCAmelCase : Union[str, Any] = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
_lowerCAmelCase : int = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
_lowerCAmelCase : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowercase (_A , _A , _A , _A , _A = None , _A = False , ):
"""simple docstring"""
_lowerCAmelCase : Dict = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCAmelCase : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCAmelCase : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_A , _A ):
_lowerCAmelCase : int = intersect_and_union(
_A , _A , _A , _A , _A , _A )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowercase (_A , _A , _A , _A , _A = None , _A = None , _A = False , ):
"""simple docstring"""
_lowerCAmelCase : int = total_intersect_and_union(
_A , _A , _A , _A , _A , _A )
# compute metrics
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : str = total_area_intersect.sum() / total_area_label.sum()
_lowerCAmelCase : str = total_area_intersect / total_area_union
_lowerCAmelCase : Dict = total_area_intersect / total_area_label
_lowerCAmelCase : Optional[int] = np.nanmean(_A )
_lowerCAmelCase : int = np.nanmean(_A )
_lowerCAmelCase : Optional[int] = all_acc
_lowerCAmelCase : str = iou
_lowerCAmelCase : Any = acc
if nan_to_num is not None:
_lowerCAmelCase : int = {metric: np.nan_to_num(_A , nan=_A ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = mean_iou(
results=snake_case__ , gt_seg_maps=snake_case__ , num_labels=snake_case__ , ignore_index=snake_case__ , nan_to_num=snake_case__ , label_map=snake_case__ , reduce_labels=snake_case__ , )
return iou_result
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = SwinvaConfig()
_lowerCAmelCase : Any = swinva_name.split('_' )
_lowerCAmelCase : Dict = name_split[1]
if "to" in name_split[3]:
_lowerCAmelCase : Tuple = int(name_split[3][-3:] )
else:
_lowerCAmelCase : Any = int(name_split[3] )
if "to" in name_split[2]:
_lowerCAmelCase : int = int(name_split[2][-2:] )
else:
_lowerCAmelCase : int = int(name_split[2][6:] )
if model_size == "tiny":
_lowerCAmelCase : Union[str, Any] = 9_6
_lowerCAmelCase : List[str] = (2, 2, 6, 2)
_lowerCAmelCase : str = (3, 6, 1_2, 2_4)
elif model_size == "small":
_lowerCAmelCase : str = 9_6
_lowerCAmelCase : List[str] = (2, 2, 1_8, 2)
_lowerCAmelCase : List[str] = (3, 6, 1_2, 2_4)
elif model_size == "base":
_lowerCAmelCase : int = 1_2_8
_lowerCAmelCase : Any = (2, 2, 1_8, 2)
_lowerCAmelCase : str = (4, 8, 1_6, 3_2)
else:
_lowerCAmelCase : Union[str, Any] = 1_9_2
_lowerCAmelCase : Dict = (2, 2, 1_8, 2)
_lowerCAmelCase : List[Any] = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
_lowerCAmelCase : List[Any] = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_lowerCAmelCase : List[str] = 2_1_8_4_1
_lowerCAmelCase : List[Any] = 'huggingface/label-files'
_lowerCAmelCase : str = 'imagenet-22k-id2label.json'
_lowerCAmelCase : int = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Optional[Any] = {int(_A ): v for k, v in idalabel.items()}
_lowerCAmelCase : List[str] = idalabel
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_lowerCAmelCase : List[Any] = 1_0_0_0
_lowerCAmelCase : Any = 'huggingface/label-files'
_lowerCAmelCase : Union[str, Any] = 'imagenet-1k-id2label.json'
_lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Optional[Any] = {int(_A ): v for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = idalabel
_lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : int = img_size
_lowerCAmelCase : int = num_classes
_lowerCAmelCase : Dict = embed_dim
_lowerCAmelCase : Optional[Any] = depths
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : List[Any] = window_size
return config
def lowercase (_A ):
"""simple docstring"""
if "patch_embed.proj" in name:
_lowerCAmelCase : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCAmelCase : List[str] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowerCAmelCase : str = 'encoder.' + name
if "attn.proj" in name:
_lowerCAmelCase : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCAmelCase : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCAmelCase : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCAmelCase : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCAmelCase : Any = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase : List[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
_lowerCAmelCase : List[Any] = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
_lowerCAmelCase : List[Any] = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
_lowerCAmelCase : List[Any] = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
_lowerCAmelCase : Tuple = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
_lowerCAmelCase : str = 'layernorm.weight'
if name == "norm.bias":
_lowerCAmelCase : int = 'layernorm.bias'
if "head" in name:
_lowerCAmelCase : Tuple = name.replace('head' , 'classifier' )
else:
_lowerCAmelCase : Optional[int] = 'swinv2.' + name
return name
def lowercase (_A , _A ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_A )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCAmelCase : Union[str, Any] = key.split('.' )
_lowerCAmelCase : List[str] = int(key_split[1] )
_lowerCAmelCase : Optional[Any] = int(key_split[3] )
_lowerCAmelCase : Optional[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase : List[Any] = val[:dim, :]
_lowerCAmelCase : List[str] = val[dim : dim * 2, :]
_lowerCAmelCase : Union[str, Any] = val[-dim:, :]
else:
_lowerCAmelCase : List[Any] = val[:dim]
_lowerCAmelCase : Union[str, Any] = val[
dim : dim * 2
]
_lowerCAmelCase : Tuple = val[-dim:]
else:
_lowerCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = timm.create_model(_A , pretrained=_A )
timm_model.eval()
_lowerCAmelCase : Any = get_swinva_config(_A )
_lowerCAmelCase : Union[str, Any] = SwinvaForImageClassification(_A )
model.eval()
_lowerCAmelCase : Any = convert_state_dict(timm_model.state_dict() , _A )
model.load_state_dict(_A )
_lowerCAmelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
_lowerCAmelCase : List[Any] = Image.open(requests.get(_A , stream=_A ).raw )
_lowerCAmelCase : int = image_processor(images=_A , return_tensors='pt' )
_lowerCAmelCase : Any = timm_model(inputs['pixel_values'] )
_lowerCAmelCase : Dict = model(**_A ).logits
assert torch.allclose(_A , _A , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["torch", "torchsde"]
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def a ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def a ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase : Any = """path-to-your-trained-model"""
lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCAmelCase : str = """A photo of sks dog in a bucket"""
lowerCAmelCase : List[str] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCAmelCase : Dict = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
lowerCAmelCase : str = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
lowerCAmelCase : Optional[int] = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def a ( self , snake_case__ , snake_case__ , snake_case__=4 , snake_case__=False ):
'''simple docstring'''
_lowerCAmelCase : Any = compute_bleu(
reference_corpus=snake_case__ , translation_corpus=snake_case__ , max_order=snake_case__ , smooth=snake_case__ )
(_lowerCAmelCase) : int = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "yolos"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=[512, 864] , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=100 , snake_case__=True , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : Union[str, Any] = qkv_bias
_lowerCAmelCase : Union[str, Any] = num_detection_tokens
_lowerCAmelCase : List[str] = use_mid_position_embeddings
_lowerCAmelCase : Dict = auxiliary_loss
# Hungarian matcher
_lowerCAmelCase : int = class_cost
_lowerCAmelCase : List[str] = bbox_cost
_lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
_lowerCAmelCase : Union[str, Any] = bbox_loss_coefficient
_lowerCAmelCase : Union[str, Any] = giou_loss_coefficient
_lowerCAmelCase : Optional[int] = eos_coefficient
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
@property
def a ( self ):
'''simple docstring'''
return 12
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "linear"
__magic_name__ = "cosine"
__magic_name__ = "cosine_with_restarts"
__magic_name__ = "polynomial"
__magic_name__ = "constant"
__magic_name__ = "constant_with_warmup"
__magic_name__ = "piecewise_constant"
def lowercase (_A , _A = -1 ):
"""simple docstring"""
return LambdaLR(_A , lambda _A : 1 , last_epoch=_A )
def lowercase (_A , _A , _A = -1 ):
"""simple docstring"""
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1.0 , _A ) )
return 1.0
return LambdaLR(_A , _A , last_epoch=_A )
def lowercase (_A , _A , _A = -1 ):
"""simple docstring"""
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : List[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_lowerCAmelCase : int = rule_str.split(':' )
_lowerCAmelCase : int = int(_A )
_lowerCAmelCase : str = float(_A )
_lowerCAmelCase : Optional[Any] = value
_lowerCAmelCase : List[Any] = float(rule_list[-1] )
def create_rules_function(_A , _A ):
def rule_func(_A ) -> float:
_lowerCAmelCase : List[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_lowerCAmelCase : Dict = create_rules_function(_A , _A )
return LambdaLR(_A , _A , last_epoch=_A )
def lowercase (_A , _A , _A , _A=-1 ):
"""simple docstring"""
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_A , _A , _A )
def lowercase (_A , _A , _A , _A = 0.5 , _A = -1 ):
"""simple docstring"""
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
_lowerCAmelCase : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_A ) * 2.0 * progress )) )
return LambdaLR(_A , _A , _A )
def lowercase (_A , _A , _A , _A = 1 , _A = -1 ):
"""simple docstring"""
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
_lowerCAmelCase : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_A ) * progress) % 1.0) )) )
return LambdaLR(_A , _A , _A )
def lowercase (_A , _A , _A , _A=1E-7 , _A=1.0 , _A=-1 ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_lowerCAmelCase : List[str] = lr_init - lr_end
_lowerCAmelCase : Optional[int] = num_training_steps - num_warmup_steps
_lowerCAmelCase : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
_lowerCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_A , _A , _A )
lowerCAmelCase : List[str] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowercase (_A , _A , _A = None , _A = None , _A = None , _A = 1 , _A = 1.0 , _A = -1 , ):
"""simple docstring"""
_lowerCAmelCase : str = SchedulerType(_A )
_lowerCAmelCase : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_A , last_epoch=_A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_A , step_rules=_A , last_epoch=_A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_A , num_warmup_steps=_A , last_epoch=_A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , num_cycles=_A , last_epoch=_A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , power=_A , last_epoch=_A , )
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , last_epoch=_A )
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "roberta"
def __init__( self , snake_case__=5_0265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Optional[int] = position_embedding_type
_lowerCAmelCase : Optional[Any] = use_cache
_lowerCAmelCase : Dict = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = LEDTokenizer
__magic_name__ = LEDTokenizerFast
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowerCAmelCase : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase : str = {'unk_token': '<unk>'}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def a ( self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def a ( self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCAmelCase : Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Dict = tokenizer(snake_case__ , max_length=len(snake_case__ ) , padding=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Dict = tokenizer(snake_case__ , padding=snake_case__ , return_tensors='pt' )
self.assertIn('input_ids' , snake_case__ )
self.assertIn('attention_mask' , snake_case__ )
self.assertNotIn('labels' , snake_case__ )
self.assertNotIn('decoder_attention_mask' , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : int = tokenizer(text_target=snake_case__ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def a ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : List[Any] = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['A long paragraph for summarization.']
_lowerCAmelCase : str = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : int = tokenizer(snake_case__ , return_tensors='pt' )
_lowerCAmelCase : List[Any] = tokenizer(text_target=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : str = inputs['input_ids']
_lowerCAmelCase : int = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def a ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : int = ['Summary of the text.', 'Another summary.']
_lowerCAmelCase : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowerCAmelCase : List[Any] = tokenizer(snake_case__ , padding=snake_case__ )
_lowerCAmelCase : Tuple = [[0] * len(snake_case__ ) for x in encoded_output['input_ids']]
_lowerCAmelCase : Optional[Any] = tokenizer.pad(snake_case__ )
self.assertSequenceEqual(outputs['global_attention_mask'] , snake_case__ )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = 'A, <mask> AllenNLP sentence.'
_lowerCAmelCase : Union[str, Any] = tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
_lowerCAmelCase : Dict = tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCAmelCase : str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = list(poly_a or [0] )[:]
_lowerCAmelCase : Union[str, Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_lowerCAmelCase : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_lowerCAmelCase : Dict = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_lowerCAmelCase : Dict = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_lowerCAmelCase : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_lowerCAmelCase : Union[str, Any] = self.__multiply()
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(snake_case__ ) <= 1:
return dft[0]
#
_lowerCAmelCase : Union[str, Any] = self.c_max_length // 2
while next_ncol > 0:
_lowerCAmelCase : List[str] = [[] for i in range(snake_case__ )]
_lowerCAmelCase : str = self.root**next_ncol
# First half of next step
_lowerCAmelCase : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(snake_case__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_lowerCAmelCase : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(snake_case__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_lowerCAmelCase : Optional[int] = new_dft
_lowerCAmelCase : Dict = next_ncol // 2
return dft[0]
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dft('A' )
_lowerCAmelCase : int = self.__dft('B' )
_lowerCAmelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_lowerCAmelCase : List[Any] = 2
while next_ncol <= self.c_max_length:
_lowerCAmelCase : Optional[int] = [[] for i in range(snake_case__ )]
_lowerCAmelCase : List[Any] = self.root ** (next_ncol // 2)
_lowerCAmelCase : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_lowerCAmelCase : Tuple = new_inverse_c
next_ncol *= 2
# Unpack
_lowerCAmelCase : Dict = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_lowerCAmelCase : Optional[int] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_lowerCAmelCase : int = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if len(_A ) < 2:
return collection
def circle_sort_util(_A , _A , _A ) -> bool:
_lowerCAmelCase : Optional[Any] = False
if low == high:
return swapped
_lowerCAmelCase : Optional[int] = low
_lowerCAmelCase : Dict = high
while left < right:
if collection[left] > collection[right]:
_lowerCAmelCase : Optional[int] = (
collection[right],
collection[left],
)
_lowerCAmelCase : Tuple = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_lowerCAmelCase : Dict = (
collection[right + 1],
collection[left],
)
_lowerCAmelCase : int = True
_lowerCAmelCase : List[Any] = low + int((high - low) / 2 )
_lowerCAmelCase : Any = circle_sort_util(_A , _A , _A )
_lowerCAmelCase : Optional[Any] = circle_sort_util(_A , mid + 1 , _A )
return swapped or left_swap or right_swap
_lowerCAmelCase : Any = True
while is_not_sorted is True:
_lowerCAmelCase : Dict = circle_sort_util(_A , 0 , len(_A ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mctct"
def __init__( self , snake_case__=8065 , snake_case__=1536 , snake_case__=36 , snake_case__=6144 , snake_case__=4 , snake_case__=384 , snake_case__=920 , snake_case__=1E-5 , snake_case__=0.3 , snake_case__="relu" , snake_case__=0.02 , snake_case__=0.3 , snake_case__=0.3 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=1 , snake_case__=0.3 , snake_case__=1 , snake_case__=(7,) , snake_case__=(3,) , snake_case__=80 , snake_case__=1 , snake_case__=None , snake_case__="sum" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Dict = attention_head_dim
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : Optional[Any] = layerdrop
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = pad_token_id
_lowerCAmelCase : Optional[Any] = bos_token_id
_lowerCAmelCase : Dict = eos_token_id
_lowerCAmelCase : Optional[int] = conv_glu_dim
_lowerCAmelCase : List[str] = conv_dropout
_lowerCAmelCase : Tuple = num_conv_layers
_lowerCAmelCase : Optional[Any] = input_feat_per_channel
_lowerCAmelCase : Union[str, Any] = input_channels
_lowerCAmelCase : Optional[int] = conv_channels
_lowerCAmelCase : Optional[int] = ctc_loss_reduction
_lowerCAmelCase : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
_lowerCAmelCase : Union[str, Any] = list(snake_case__ )
_lowerCAmelCase : Dict = list(snake_case__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = params
_lowerCAmelCase : Dict = np.array(snake_case__ )
_lowerCAmelCase : Optional[Any] = np.array([len(snake_case__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , snake_case__ ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def a ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.params.max_model_input_size
_lowerCAmelCase : int = self.lengths > max_len
logger.info(F'Splitting {sum(snake_case__ )} too long sequences.' )
def divide_chunks(snake_case__ , snake_case__ ):
return [l[i : i + n] for i in range(0 , len(snake_case__ ) , snake_case__ )]
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : str = []
if self.params.mlm:
_lowerCAmelCase : Union[str, Any] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
_lowerCAmelCase : str = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_lowerCAmelCase : str = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_lowerCAmelCase : Optional[Any] = np.insert(snake_case__ , 0 , snake_case__ )
if sub_s[-1] != sep_id:
_lowerCAmelCase : str = np.insert(snake_case__ , len(snake_case__ ) , snake_case__ )
assert len(snake_case__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(snake_case__ )
new_tok_ids.extend(snake_case__ )
new_lengths.extend([len(snake_case__ ) for l in sub_seqs] )
_lowerCAmelCase : int = np.array(snake_case__ )
_lowerCAmelCase : Any = np.array(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = len(self )
_lowerCAmelCase : List[str] = self.lengths > 11
_lowerCAmelCase : Optional[Any] = self.token_ids[indices]
_lowerCAmelCase : List[Any] = self.lengths[indices]
_lowerCAmelCase : Optional[int] = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def a ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowerCAmelCase : Any = self.params.special_tok_ids['unk_token']
_lowerCAmelCase : Tuple = len(self )
_lowerCAmelCase : Any = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_lowerCAmelCase : Tuple = (unk_occs / self.lengths) < 0.5
_lowerCAmelCase : Union[str, Any] = self.token_ids[indices]
_lowerCAmelCase : Tuple = self.lengths[indices]
_lowerCAmelCase : Union[str, Any] = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def a ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [t[0] for t in batch]
_lowerCAmelCase : str = [t[1] for t in batch]
assert len(snake_case__ ) == len(snake_case__ )
# Max for paddings
_lowerCAmelCase : Dict = max(snake_case__ )
# Pad token ids
if self.params.mlm:
_lowerCAmelCase : Any = self.params.special_tok_ids['pad_token']
else:
_lowerCAmelCase : Any = self.params.special_tok_ids['unk_token']
_lowerCAmelCase : Tuple = [list(t.astype(snake_case__ ) ) + [pad_idx] * (max_seq_len_ - len(snake_case__ )) for t in token_ids]
assert len(tk_ ) == len(snake_case__ )
assert all(len(snake_case__ ) == max_seq_len_ for t in tk_ )
_lowerCAmelCase : Optional[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
_lowerCAmelCase : List[str] = torch.tensor(snake_case__ ) # (bs)
return tk_t, lg_t
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 5_00_03
lowerCAmelCase : Optional[Any] = 5_00_02
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = PLBartTokenizer
__magic_name__ = None
__magic_name__ = False
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Optional[int] = PLBartTokenizer(snake_case__ , language_codes='base' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = PLBartTokenizer(snake_case__ , language_codes='base' , keep_accents=snake_case__ )
_lowerCAmelCase : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCAmelCase : str = tokenizer.vocab_size
_lowerCAmelCase : Tuple = [tokenizer.convert_ids_to_tokens(snake_case__ ) for x in range(end - 4 , snake_case__ )]
self.assertListEqual(snake_case__ , ['__java__', '__python__', '__en_XX__', '<mask>'] )
_lowerCAmelCase : Optional[int] = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCAmelCase : List[Any] = tokenizer(snake_case__ ).input_ids
self.assertEqual(
tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) , snake_case__ , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PLBartTokenizer(snake_case__ , language_codes='multi' , keep_accents=snake_case__ )
_lowerCAmelCase : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCAmelCase : Any = tokenizer.vocab_size
_lowerCAmelCase : int = [tokenizer.convert_ids_to_tokens(snake_case__ ) for x in range(end - 7 , snake_case__ )]
self.assertListEqual(
snake_case__ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
_lowerCAmelCase : List[str] = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCAmelCase : str = tokenizer(snake_case__ ).input_ids
self.assertEqual(
tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) , snake_case__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "uclanlp/plbart-python-en_XX"
__magic_name__ = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
__magic_name__ = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
__magic_name__ = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
_lowerCAmelCase : Optional[int] = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0003 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCAmelCase : Optional[Any] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , snake_case__ )
_lowerCAmelCase : int = 10
_lowerCAmelCase : str = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , snake_case__ )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0004, 5_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : str = PLBartTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Union[str, Any] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , snake_case__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : List[str] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCAmelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : int = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : Optional[Any] = targets['input_ids']
_lowerCAmelCase : Optional[int] = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 5_0003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0001,
} , )
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
from PIL import Image
def lowercase (_A , _A ):
"""simple docstring"""
def brightness(_A ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_A )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowerCAmelCase : Optional[int] = change_brightness(img, 1_00)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = set()
# edges = list of graph's edges
_lowerCAmelCase : int = get_edges(_A )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_lowerCAmelCase : Optional[Any] = edges.pop()
chosen_vertices.add(_A )
chosen_vertices.add(_A )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_A )
return chosen_vertices
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = CTRLTokenizer
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Optional[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowerCAmelCase : Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase : List[str] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowerCAmelCase : Optional[int] = {'unk_token': '<unk>'}
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = 'adapt react readapt apt'
_lowerCAmelCase : Tuple = 'adapt react readapt apt'
return input_text, output_text
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : List[str] = 'adapt react readapt apt'
_lowerCAmelCase : Tuple = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=2 , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : int = is_training
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Optional[int] = num_patches + 2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def a ( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = DeiTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Any = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = DeiTForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[int] = DeiTForMaskedImageModeling(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.type_sequence_label_size
_lowerCAmelCase : List[str] = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : str = 1
_lowerCAmelCase : Any = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : List[str] = config_and_inputs
_lowerCAmelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = DeiTModelTester(self )
_lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case__ )
_lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
_lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def a ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_lowerCAmelCase : Tuple = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
_lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
_lowerCAmelCase : str = model(**snake_case__ ).loss
loss.backward()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase : str = False
_lowerCAmelCase : str = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_lowerCAmelCase : Tuple = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
_lowerCAmelCase : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
_lowerCAmelCase : Optional[int] = model(**snake_case__ ).loss
loss.backward()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
_lowerCAmelCase : Optional[Any] = problem_type['title']
_lowerCAmelCase : List[str] = problem_type['num_labels']
_lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
_lowerCAmelCase : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if problem_type["num_labels"] > 1:
_lowerCAmelCase : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
_lowerCAmelCase : Union[str, Any] = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case__ ) as warning_list:
_lowerCAmelCase : Any = model(**snake_case__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def a ( self ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = DeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
snake_case__ )
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : List[Any] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case__ )
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCAmelCase : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
_lowerCAmelCase : Any = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Tuple = inputs.pixel_values.to(snake_case__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(snake_case__ )
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : Dict = BlipImageProcessor()
_lowerCAmelCase : Any = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
_lowerCAmelCase : str = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
_lowerCAmelCase : Tuple = InstructBlipProcessor(snake_case__ , snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def a ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def a ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def a ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).qformer_tokenizer
def a ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
_lowerCAmelCase : Optional[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
self.assertIsInstance(processor.qformer_tokenizer , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[str] = self.get_qformer_tokenizer()
_lowerCAmelCase : Tuple = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(snake_case__ , return_tensors='np' )
_lowerCAmelCase : Union[str, Any] = processor(images=snake_case__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_qformer_tokenizer()
_lowerCAmelCase : List[str] = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCAmelCase : List[str] = 'lower newer'
_lowerCAmelCase : Tuple = processor(text=snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
_lowerCAmelCase : Dict = qformer_tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_qformer_tokenizer()
_lowerCAmelCase : Optional[Any] = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCAmelCase : List[Any] = 'lower newer'
_lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCAmelCase : List[Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_qformer_tokenizer()
_lowerCAmelCase : str = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Tuple = processor.batch_decode(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_qformer_tokenizer()
_lowerCAmelCase : List[str] = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
_lowerCAmelCase : Optional[int] = 'lower newer'
_lowerCAmelCase : str = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=0.9 , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = size if size is not None else {'shortest_edge': 30}
_lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Dict = min_resolution
_lowerCAmelCase : Dict = max_resolution
_lowerCAmelCase : str = do_resize_and_center_crop
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Optional[Any] = crop_pct
_lowerCAmelCase : Optional[int] = crop_size
_lowerCAmelCase : Optional[int] = do_normalize
_lowerCAmelCase : str = image_mean
_lowerCAmelCase : int = image_std
def a ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = PoolFormerImageProcessor if is_vision_available() else None
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = PoolFormerImageProcessingTester(self )
@property
def a ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
self.assertTrue(hasattr(snake_case__ , 'crop_pct' ) )
self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case__ , 'image_std' ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase : List[Any] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase : str = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : List[Any] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_A , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_A , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_A )
return parser.parse_args()
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = parse_args()
# Import training_script as a module.
_lowerCAmelCase : List[str] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase : int = script_fpath.stem
_lowerCAmelCase : List[Any] = importlib.import_module(_A )
# Patch sys.argv
_lowerCAmelCase : Optional[int] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = len(_A )
_lowerCAmelCase : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCAmelCase : Dict = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCAmelCase : int = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCAmelCase : List[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCAmelCase : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def lowercase (_A ):
"""simple docstring"""
if hor == 1_2_8:
_lowerCAmelCase : Dict = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_lowerCAmelCase : str = (3_2, 1_2_8, 2_5_6)
_lowerCAmelCase : str = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 3_2:
_lowerCAmelCase : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_lowerCAmelCase : Tuple = (3_2, 6_4, 1_2_8, 2_5_6)
_lowerCAmelCase : Union[str, Any] = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
_lowerCAmelCase : Optional[int] = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
_lowerCAmelCase : Optional[int] = model.state_dict()
_lowerCAmelCase : Any = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 1_4,
'out_channels': 1_4,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_5_5_3_6,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
_lowerCAmelCase : List[Any] = UNetaDModel(**_A )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
_lowerCAmelCase : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase : str = state_dict.pop(_A )
hf_value_function.load_state_dict(_A )
torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f:
json.dump(_A , _A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Dict = {
'in_channels': 1_4,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (3_2, 6_4, 1_2_8, 2_5_6),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_5_5_3_6,
'out_channels': 1_4,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
_lowerCAmelCase : Optional[Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
_lowerCAmelCase : str = model
_lowerCAmelCase : Optional[int] = UNetaDModel(**_A )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
_lowerCAmelCase : int = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase : List[str] = state_dict.pop(_A )
hf_value_function.load_state_dict(_A )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(_A , _A )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = """Hello world! cécé herlolip"""
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Any = FairseqRobertaModel.from_pretrained(_A )
roberta.eval() # disable dropout
_lowerCAmelCase : Tuple = roberta.model.encoder.sentence_encoder
_lowerCAmelCase : List[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
_lowerCAmelCase : List[Any] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _A )
_lowerCAmelCase : List[str] = XLMRobertaXLForSequenceClassification(_A ) if classification_head else XLMRobertaXLForMaskedLM(_A )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase : Optional[int] = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase : Optional[Any] = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase : Dict = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase : List[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase : BertLayer = model.roberta.encoder.layer[i]
_lowerCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
_lowerCAmelCase : RobertaAttention = layer.attention
_lowerCAmelCase : int = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase : str = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase : Dict = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase : List[Any] = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase : Optional[int] = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase : Tuple = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase : Dict = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase : Dict = roberta_layer.final_layer_norm.weight
_lowerCAmelCase : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase : List[Any] = roberta_layer.fca.weight
_lowerCAmelCase : int = roberta_layer.fca.bias
# output
_lowerCAmelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase : Tuple = roberta_layer.fca.weight
_lowerCAmelCase : Tuple = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase : str = roberta.model.classification_heads['mnli'].dense.weight
_lowerCAmelCase : Optional[int] = roberta.model.classification_heads['mnli'].dense.bias
_lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['mnli'].out_proj.weight
_lowerCAmelCase : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
_lowerCAmelCase : List[Any] = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase : Dict = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase : Any = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase : Dict = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase : int = roberta.model.encoder.lm_head.weight
_lowerCAmelCase : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase : torch.Tensor = roberta.encode(_A ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase : int = model(_A )[0]
if classification_head:
_lowerCAmelCase : List[Any] = roberta.model.classification_heads['mnli'](roberta.extract_features(_A ) )
else:
_lowerCAmelCase : Dict = roberta.model(_A )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
_lowerCAmelCase : List[Any] = torch.allclose(_A , _A , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_A ).mkdir(parents=_A , exist_ok=_A )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , )
def a ( self , snake_case__ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def a ( self ):
'''simple docstring'''
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Tuple = 1
elif isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : List[str] = len(snake_case__ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(snake_case__ )}.' )
# get prompt text embeddings
_lowerCAmelCase : Union[str, Any] = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowerCAmelCase : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_lowerCAmelCase : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase : Any = text_embeddings.shape
_lowerCAmelCase : int = text_embeddings.repeat(1 , snake_case__ , 1 )
_lowerCAmelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : List[str]
if negative_prompt is None:
_lowerCAmelCase : Dict = ['']
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !='
F' {type(snake_case__ )}.' )
elif isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : List[str] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
_lowerCAmelCase : Optional[Any] = negative_prompt
_lowerCAmelCase : Tuple = text_input_ids.shape[-1]
_lowerCAmelCase : Optional[Any] = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
_lowerCAmelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase : Dict = uncond_embeddings.shape[1]
_lowerCAmelCase : Any = uncond_embeddings.repeat(snake_case__ , snake_case__ , 1 )
_lowerCAmelCase : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_lowerCAmelCase : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase : Tuple = torch.randn(
snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(self.device )
_lowerCAmelCase : Optional[Any] = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
_lowerCAmelCase : List[str] = torch.randn(
snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
_lowerCAmelCase : List[str] = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_lowerCAmelCase : str = latents_reference.to(self.device )
_lowerCAmelCase : Dict = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_lowerCAmelCase : Dict = (latents_shape[3] - latents_shape_reference[3]) // 2
_lowerCAmelCase : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
_lowerCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_lowerCAmelCase : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_lowerCAmelCase : int = 0 if dx < 0 else dx
_lowerCAmelCase : str = 0 if dy < 0 else dy
_lowerCAmelCase : Tuple = max(-dx , 0 )
_lowerCAmelCase : str = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_lowerCAmelCase : Union[str, Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : Optional[int] = {}
if accepts_eta:
_lowerCAmelCase : Optional[Any] = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : List[str] = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
_lowerCAmelCase : Union[str, Any] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[Any] = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : Any = 1 / 0.1_8215 * latents
_lowerCAmelCase : Optional[int] = self.vae.decode(snake_case__ ).sample
_lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_lowerCAmelCase : Union[str, Any] = self.feature_extractor(self.numpy_to_pil(snake_case__ ) , return_tensors='pt' ).to(
self.device )
_lowerCAmelCase : Dict = self.safety_checker(
images=snake_case__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_lowerCAmelCase : Dict = None
if output_type == "pil":
_lowerCAmelCase : str = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase : Tuple = """src/diffusers"""
lowerCAmelCase : Dict = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase : Union[str, Any] = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase : List[Any] = spec.loader.load_module()
def lowercase (_A , _A ):
"""simple docstring"""
return line.startswith(_A ) or len(_A ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , _A ) is not None
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = object_name.split('.' )
_lowerCAmelCase : Optional[Any] = 0
# First let's find the module where our object lives.
_lowerCAmelCase : Dict = parts[i]
while i < len(_A ) and not os.path.isfile(os.path.join(_A , f'{module}.py' ) ):
i += 1
if i < len(_A ):
_lowerCAmelCase : Dict = os.path.join(_A , parts[i] )
if i >= len(_A ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_A , f'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase : List[Any] = f.readlines()
# Now let's find the class / func in the code!
_lowerCAmelCase : Dict = ''
_lowerCAmelCase : Tuple = 0
for name in parts[i + 1 :]:
while (
line_index < len(_A ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_A ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCAmelCase : Tuple = line_index
while line_index < len(_A ) and _should_continue(lines[line_index] , _A ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase : Dict = lines[start_index:line_index]
return "".join(_A )
lowerCAmelCase : Union[str, Any] = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowerCAmelCase : str = re.compile(r"""<FILL\s+[^>]*>""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = code.split('\n' )
_lowerCAmelCase : Optional[Any] = 0
while idx < len(_A ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_A ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = len(get_indent(_A ) ) > 0
if has_indent:
_lowerCAmelCase : int = f'class Bla:\n{code}'
_lowerCAmelCase : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=_A )
_lowerCAmelCase : List[str] = black.format_str(_A , mode=_A )
_lowerCAmelCase : List[str] = style_docstrings_in_code(_A )
return result[len('class Bla:\n' ) :] if has_indent else result
def lowercase (_A , _A=False ):
"""simple docstring"""
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase : str = f.readlines()
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_A ):
_lowerCAmelCase : List[str] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCAmelCase : Optional[Any] = search.groups()
_lowerCAmelCase : Optional[int] = find_code_in_diffusers(_A )
_lowerCAmelCase : List[Any] = get_indent(_A )
_lowerCAmelCase : str = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCAmelCase : Union[str, Any] = theoretical_indent
_lowerCAmelCase : Dict = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCAmelCase : Optional[int] = True
while line_index < len(_A ) and should_continue:
line_index += 1
if line_index >= len(_A ):
break
_lowerCAmelCase : Any = lines[line_index]
_lowerCAmelCase : Optional[int] = _should_continue(_A , _A ) and re.search(f'^{indent}# End copy' , _A ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase : Union[str, Any] = lines[start_index:line_index]
_lowerCAmelCase : Optional[int] = ''.join(_A )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCAmelCase : List[Any] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(_A ) is None]
_lowerCAmelCase : List[Any] = '\n'.join(_A )
# Before comparing, use the `replace_pattern` on the original code.
if len(_A ) > 0:
_lowerCAmelCase : List[Any] = replace_pattern.replace('with' , '' ).split(',' )
_lowerCAmelCase : Union[str, Any] = [_re_replace_pattern.search(_A ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCAmelCase : Tuple = pattern.groups()
_lowerCAmelCase : Optional[Any] = re.sub(_A , _A , _A )
if option.strip() == "all-casing":
_lowerCAmelCase : Any = re.sub(obja.lower() , obja.lower() , _A )
_lowerCAmelCase : Optional[int] = re.sub(obja.upper() , obja.upper() , _A )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCAmelCase : Any = blackify(lines[start_index - 1] + theoretical_code )
_lowerCAmelCase : Optional[int] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCAmelCase : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCAmelCase : str = start_index + 1
if overwrite and len(_A ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_A )
return diffs
def lowercase (_A = False ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = glob.glob(os.path.join(_A , '**/*.py' ) , recursive=_A )
_lowerCAmelCase : Union[str, Any] = []
for filename in all_files:
_lowerCAmelCase : int = is_copy_consistent(_A , _A )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_A ) > 0:
_lowerCAmelCase : Any = '\n'.join(_A )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase : Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
import requests
lowerCAmelCase : List[str] = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase : Tuple = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCAmelCase : Dict = {"""facebook/blenderbot-3B""": 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowerCAmelCase : Union[str, Any] = bs[:]
_lowerCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : str = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = set()
_lowerCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Union[str, Any] = char
return pairs
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__ , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
_lowerCAmelCase : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
_lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
_lowerCAmelCase : Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
_lowerCAmelCase : Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : str = json.load(snake_case__ )
_lowerCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Dict = errors # how to handle errors in decoding
_lowerCAmelCase : int = bytes_to_unicode()
_lowerCAmelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case__ , encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : str = merges_handle.read().split('\n' )[1:-1]
_lowerCAmelCase : str = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Optional[int] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a ( self ):
'''simple docstring'''
return len(self.encoder )
def a ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self , snake_case__ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Optional[Any] = tuple(snake_case__ )
_lowerCAmelCase : Tuple = get_pairs(snake_case__ )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase : Optional[Any] = bigram
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : str = 0
while i < len(snake_case__ ):
try:
_lowerCAmelCase : List[Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : str = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : List[str] = tuple(snake_case__ )
_lowerCAmelCase : int = new_word
if len(snake_case__ ) == 1:
break
else:
_lowerCAmelCase : Tuple = get_pairs(snake_case__ )
_lowerCAmelCase : Union[str, Any] = ' '.join(snake_case__ )
_lowerCAmelCase : Union[str, Any] = word
return word
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = []
for token in re.findall(self.pat , snake_case__ ):
_lowerCAmelCase : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(' ' ) )
return bpe_tokens
def a ( self , snake_case__ ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = ''.join(snake_case__ )
_lowerCAmelCase : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : Any = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '\n' )
_lowerCAmelCase : int = 0
with open(snake_case__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Tuple = token_index
writer.write(' '.join(snake_case__ ) + '\n' )
index += 1
return vocab_file, merge_file
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__=False , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()):
_lowerCAmelCase : Tuple = ' ' + text
return (text, kwargs)
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case__ )
_lowerCAmelCase : List[Any] = ' '.join(snake_case__ )
_lowerCAmelCase : str = self.encode(snake_case__ )
if len(snake_case__ ) > self.model_max_length:
_lowerCAmelCase : Any = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase : Tuple = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase : Any = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase : Tuple = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = 2_0_0
__magic_name__ = {"Content-Length": "100"}
__magic_name__ = {}
def a ( self , **snake_case__ ):
'''simple docstring'''
return [bytes(snake_case__ , 'utf-8' )]
def lowercase (*_A , **_A ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowercase (_A , _A , _A ):
"""simple docstring"""
import requests
monkeypatch.setattr(_A , 'request' , _A )
_lowerCAmelCase : Optional[Any] = URL
if issubclass(_A , _A ):
_lowerCAmelCase : List[str] = url
elif issubclass(_A , _A ):
_lowerCAmelCase : str = [url]
elif issubclass(_A , _A ):
_lowerCAmelCase : Tuple = {'train': url}
_lowerCAmelCase : Tuple = 'dummy'
_lowerCAmelCase : Optional[int] = 'downloads'
_lowerCAmelCase : int = tmp_path
_lowerCAmelCase : Dict = DownloadConfig(
cache_dir=os.path.join(_A , _A ) , use_etag=_A , )
_lowerCAmelCase : str = DownloadManager(dataset_name=_A , download_config=_A )
_lowerCAmelCase : int = dl_manager.download(_A )
_lowerCAmelCase : Optional[int] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_A , _A ):
_lowerCAmelCase : Tuple = [downloaded_paths]
_lowerCAmelCase : Optional[Any] = [urls]
elif isinstance(_A , _A ):
assert "train" in downloaded_paths.keys()
_lowerCAmelCase : int = downloaded_paths.values()
_lowerCAmelCase : Dict = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_A , _A ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_lowerCAmelCase : Union[str, Any] = Path(_A )
_lowerCAmelCase : Optional[int] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_lowerCAmelCase : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
_lowerCAmelCase : int = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_lowerCAmelCase : List[str] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = str(_A )
if issubclass(_A , _A ):
_lowerCAmelCase : Any = filename
elif issubclass(_A , _A ):
_lowerCAmelCase : int = [filename]
elif issubclass(_A , _A ):
_lowerCAmelCase : int = {'train': filename}
_lowerCAmelCase : Tuple = 'dummy'
_lowerCAmelCase : int = xz_file.parent
_lowerCAmelCase : List[str] = 'extracted'
_lowerCAmelCase : List[str] = DownloadConfig(
cache_dir=_A , use_etag=_A , )
_lowerCAmelCase : Optional[Any] = DownloadManager(dataset_name=_A , download_config=_A )
_lowerCAmelCase : Union[str, Any] = dl_manager.extract(_A )
_lowerCAmelCase : List[str] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_A , _A ):
_lowerCAmelCase : Tuple = [extracted_paths]
_lowerCAmelCase : Optional[int] = [paths]
elif isinstance(_A , _A ):
assert "train" in extracted_paths.keys()
_lowerCAmelCase : str = extracted_paths.values()
_lowerCAmelCase : List[str] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_A , _A ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_lowerCAmelCase : Any = Path(_A )
_lowerCAmelCase : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_A , etag=_A )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_lowerCAmelCase : Any = extracted_path.read_text()
_lowerCAmelCase : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowercase (_A , _A ):
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(_A , start=1 ):
_lowerCAmelCase : Any = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = request.getfixturevalue(_A )
_lowerCAmelCase : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_A ) , start=1 ):
_test_jsonl(_A , _A )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = request.getfixturevalue(_A )
_lowerCAmelCase : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_A ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_A ) , start=1 ):
_test_jsonl(_A , _A )
assert num_tar == 1
assert num_jsonl == 2
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_A ) , start=1 ):
assert os.path.basename(_A ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
lowerCAmelCase : Dict = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : int = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase : List[str] = 5
lowerCAmelCase : Dict = 10
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = SpeechaTextTokenizer
__magic_name__ = False
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Optional[int] = sp.SentencePieceProcessor()
spm_model.Load(snake_case__ )
_lowerCAmelCase : Optional[Any] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case__ ) )]
_lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase : Tuple = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
_lowerCAmelCase : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = '<pad>'
_lowerCAmelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 1001 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [289, 50, 14, 174, 386] , )
_lowerCAmelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
_lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "valhalla/s2t_mustc_multilinguial_medium"
__magic_name__ = "C'est trop cool"
__magic_name__ = "Esto es genial"
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Optional[int] = [ES_CODE, 4, 1601, 47, 7647, 2]
_lowerCAmelCase : Any = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = 'fr'
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
_lowerCAmelCase : str = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
import math
import sys
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ''
try:
with open(_A , 'rb' ) as binary_file:
_lowerCAmelCase : Dict = binary_file.read()
for dat in data:
_lowerCAmelCase : Any = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = {'0': '0', '1': '1'}
_lowerCAmelCase : Optional[int] = '', ''
_lowerCAmelCase : Optional[Any] = len(_A )
for i in range(len(_A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase : Any = lexicon[curr_string]
result += last_match_id
_lowerCAmelCase : Dict = last_match_id + '0'
if math.loga(_A ).is_integer():
_lowerCAmelCase : str = {}
for curr_key in list(_A ):
_lowerCAmelCase : Any = lexicon.pop(_A )
_lowerCAmelCase : Tuple = new_lex
_lowerCAmelCase : List[str] = last_match_id + '1'
index += 1
_lowerCAmelCase : Dict = ''
return result
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 8
try:
with open(_A , 'wb' ) as opened_file:
_lowerCAmelCase : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(_A ) , _A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_A , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCAmelCase : Optional[Any] = data_bits[counter:]
_lowerCAmelCase : str = data_bits[counter + 1 :]
return data_bits
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = read_file_binary(_A )
_lowerCAmelCase : List[str] = remove_prefix(_A )
_lowerCAmelCase : str = decompress_data(_A )
write_file_binary(_A , _A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCAmelCase : List[str] = """bert-base-cased"""
lowerCAmelCase : Dict = """fp16"""
lowerCAmelCase : Any = """bf16"""
lowerCAmelCase : Any = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : List[str] = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(snake_case__ ):
_lowerCAmelCase : List[str] = self.dist_env.copy()
_lowerCAmelCase : Optional[Any] = F'{i + 1}'
_lowerCAmelCase : Optional[Any] = strategy
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(snake_case__ ):
_lowerCAmelCase : str = self.dist_env.copy()
_lowerCAmelCase : str = prefetch_policy
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(snake_case__ ):
_lowerCAmelCase : List[str] = self.dist_env.copy()
_lowerCAmelCase : str = state_dict_type
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoModel.from_pretrained(snake_case__ )
for policy in FSDP_AUTO_WRAP_POLICY:
_lowerCAmelCase : List[str] = self.dist_env.copy()
_lowerCAmelCase : int = policy
if policy == "TRANSFORMER_BASED_WRAP":
_lowerCAmelCase : str = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
_lowerCAmelCase : List[Any] = '2000'
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_lowerCAmelCase : Any = self.dist_env.copy()
_lowerCAmelCase : str = 'TRANSFORMER_BASED_WRAP'
_lowerCAmelCase : Optional[Any] = 'T5Layer'
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : int = FullyShardedDataParallelPlugin()
with self.assertRaises(snake_case__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(snake_case__ )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
_lowerCAmelCase : List[Any] = self.dist_env.copy()
_lowerCAmelCase : Optional[int] = 'SIZE_BASED_WRAP'
_lowerCAmelCase : List[str] = '0'
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_lowerCAmelCase : Dict = self.dist_env.copy()
_lowerCAmelCase : int = mp_dtype
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : List[str] = Accelerator()
if mp_dtype == "fp16":
_lowerCAmelCase : Any = torch.floataa
elif mp_dtype == "bf16":
_lowerCAmelCase : List[Any] = torch.bfloataa
_lowerCAmelCase : Tuple = MixedPrecision(param_dtype=snake_case__ , reduce_dtype=snake_case__ , buffer_dtype=snake_case__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , snake_case__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(snake_case__ )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_lowerCAmelCase : List[Any] = self.dist_env.copy()
_lowerCAmelCase : Dict = str(snake_case__ ).lower()
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case__ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : List[str] = 0.82
_lowerCAmelCase : int = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
_lowerCAmelCase : Any = {
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_lowerCAmelCase : str = 160
_lowerCAmelCase : Any = 160
_lowerCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = os.path.join(self.test_scripts_folder , 'test_performance.py' )
_lowerCAmelCase : Dict = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
_lowerCAmelCase : List[Any] = cmd.copy()
for i, strategy in enumerate(snake_case__ ):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
_lowerCAmelCase : Any = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(snake_case__ ):
_lowerCAmelCase : List[str] = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
if strategy != "FULL_SHARD":
continue
_lowerCAmelCase : Any = len(snake_case__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_lowerCAmelCase : List[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
_lowerCAmelCase : Any = cmd_config[:-1]
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
_lowerCAmelCase : Dict = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_lowerCAmelCase : str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(snake_case__ ):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "wavlm"
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1E-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=320 , snake_case__=800 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=320 , snake_case__=2 , snake_case__=0.1 , snake_case__=100 , snake_case__=256 , snake_case__=256 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=(512, 512, 512, 512, 1500) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=512 , snake_case__=80 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=3 , snake_case__=2 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[Any] = feat_extract_norm
_lowerCAmelCase : List[Any] = feat_extract_activation
_lowerCAmelCase : Dict = list(snake_case__ )
_lowerCAmelCase : List[Any] = list(snake_case__ )
_lowerCAmelCase : Tuple = list(snake_case__ )
_lowerCAmelCase : Any = conv_bias
_lowerCAmelCase : Optional[int] = num_buckets
_lowerCAmelCase : Optional[int] = max_bucket_distance
_lowerCAmelCase : int = num_conv_pos_embeddings
_lowerCAmelCase : Optional[int] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim )
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : int = hidden_dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Any = feat_proj_dropout
_lowerCAmelCase : Dict = final_dropout
_lowerCAmelCase : List[Any] = layerdrop
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Tuple = num_ctc_classes
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : List[Any] = do_stable_layer_norm
_lowerCAmelCase : Optional[Any] = use_weighted_layer_sum
_lowerCAmelCase : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Any = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : Union[str, Any] = mask_time_min_masks
_lowerCAmelCase : Tuple = mask_feature_prob
_lowerCAmelCase : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Any = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Dict = contrastive_logits_temperature
_lowerCAmelCase : List[str] = num_negatives
_lowerCAmelCase : Optional[int] = codevector_dim
_lowerCAmelCase : int = proj_codevector_dim
_lowerCAmelCase : Dict = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Optional[int] = ctc_zero_infinity
# adapter
_lowerCAmelCase : Tuple = add_adapter
_lowerCAmelCase : Optional[Any] = adapter_kernel_size
_lowerCAmelCase : Union[str, Any] = adapter_stride
_lowerCAmelCase : Any = num_adapter_layers
_lowerCAmelCase : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : Tuple = list(snake_case__ )
_lowerCAmelCase : List[Any] = list(snake_case__ )
_lowerCAmelCase : List[str] = list(snake_case__ )
_lowerCAmelCase : str = xvector_output_dim
@property
def a ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def lowercase ():
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase__ :
"""simple docstring"""
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return None
class UpperCamelCase__ :
"""simple docstring"""
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return None
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case__ , 'tf' , 12 , **snake_case__ )
@require_torch
@slow
def a ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case__ , 'pt' , 12 , **snake_case__ )
@require_torch
@slow
def a ( self ):
'''simple docstring'''
from transformers import BertModel
_lowerCAmelCase : List[Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(snake_case__ ) )
vocab_file.flush()
_lowerCAmelCase : List[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowerCAmelCase : List[Any] = BertModel(BertConfig(vocab_size=len(snake_case__ ) ) )
model.save_pretrained(snake_case__ )
self._test_export(snake_case__ , 'pt' , 12 , snake_case__ )
@require_tf
@slow
def a ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCAmelCase : int = self._test_export(snake_case__ , 'tf' , 12 , **snake_case__ )
_lowerCAmelCase : List[Any] = quantize(Path(snake_case__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case__ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCAmelCase : Dict = self._test_export(snake_case__ , 'pt' , 12 , **snake_case__ )
_lowerCAmelCase : Union[str, Any] = quantize(snake_case__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case__ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : int = Path(snake_case__ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
return path
except Exception as e:
self.fail(snake_case__ )
@require_torch
@require_tokenizers
@slow
def a ( self ):
'''simple docstring'''
from transformers import BertModel
_lowerCAmelCase : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_lowerCAmelCase : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case__ , snake_case__ , 'pt' )
@require_tf
@require_tokenizers
@slow
def a ( self ):
'''simple docstring'''
from transformers import TFBertModel
_lowerCAmelCase : List[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_lowerCAmelCase : List[str] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case__ , snake_case__ , 'tf' )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = FeatureExtractionPipeline(snake_case__ , snake_case__ )
_lowerCAmelCase : str = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
_lowerCAmelCase : Optional[int] = infer_shapes(snake_case__ , snake_case__ )
# Assert all variables are present
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , snake_case__ )
self.assertSequenceEqual(variable_names[3:] , snake_case__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
_lowerCAmelCase : Any = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
_lowerCAmelCase : Union[str, Any] = ensure_valid_input(FuncContiguousArgs() , snake_case__ , snake_case__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case__ ) , set(snake_case__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case__ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCAmelCase : Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , snake_case__ , snake_case__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case__ ) , 1 )
self.assertEqual(len(snake_case__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "ibert"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=False , snake_case__="none" , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = position_embedding_type
_lowerCAmelCase : Tuple = quant_mode
_lowerCAmelCase : int = force_dequant
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Optional[int] = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = ["""BeitFeatureExtractor"""]
lowerCAmelCase : Optional[Any] = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = filter(lambda _A : p.requires_grad , model.parameters() )
_lowerCAmelCase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase : str = logging.getLogger(__name__)
def lowercase (_A , _A ):
"""simple docstring"""
if metric == "rouge2":
_lowerCAmelCase : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_lowerCAmelCase : List[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_lowerCAmelCase : List[str] = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
_lowerCAmelCase : Tuple = ModelCheckpoint(
dirpath=_A , filename=_A , monitor=f'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase (_A , _A ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=_A , verbose=_A , )
class UpperCamelCase__ ( pl.Callback ):
"""simple docstring"""
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case__ )
@rank_zero_only
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=True ):
'''simple docstring'''
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
_lowerCAmelCase : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_lowerCAmelCase : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCAmelCase : int = od / 'test_results.txt'
_lowerCAmelCase : str = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCAmelCase : Union[str, Any] = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
_lowerCAmelCase : List[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=snake_case__ )
generations_file.parent.mkdir(exist_ok=snake_case__ )
with open(snake_case__ , 'a+' ) as writer:
for key in sorted(snake_case__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCAmelCase : Any = metrics[key]
if isinstance(snake_case__ , torch.Tensor ):
_lowerCAmelCase : List[Any] = val.item()
_lowerCAmelCase : List[Any] = F'{key}: {val:.6f}\n'
writer.write(snake_case__ )
if not save_generations:
return
if "preds" in metrics:
_lowerCAmelCase : Union[str, Any] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case__ )
@rank_zero_only
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
try:
_lowerCAmelCase : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCAmelCase : Optional[int] = pl_module.model.num_parameters()
_lowerCAmelCase : int = count_trainable_parameters(snake_case__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case__ , snake_case__ , 'test' )
@rank_zero_only
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase : Optional[Any] = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCAmelCase : Dict = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Any = SavedModel()
_lowerCAmelCase : Dict = []
with open(os.path.join(_A , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
_lowerCAmelCase : Optional[int] = json.load(_A )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_A )] )
with open(_A , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
_lowerCAmelCase : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_lowerCAmelCase : str = sorted(_A )
_lowerCAmelCase : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_A )
if strict and len(_A ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(_A ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*_A , sep='\n' )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
lowerCAmelCase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = 3_8_4
_lowerCAmelCase : Union[str, Any] = 7
if "tiny" in model_name:
_lowerCAmelCase : Optional[int] = 9_6
_lowerCAmelCase : Optional[int] = (2, 2, 6, 2)
_lowerCAmelCase : Union[str, Any] = (3, 6, 1_2, 2_4)
elif "small" in model_name:
_lowerCAmelCase : Optional[int] = 9_6
_lowerCAmelCase : int = (2, 2, 1_8, 2)
_lowerCAmelCase : List[str] = (3, 6, 1_2, 2_4)
elif "base" in model_name:
_lowerCAmelCase : List[str] = 1_2_8
_lowerCAmelCase : Dict = (2, 2, 1_8, 2)
_lowerCAmelCase : Dict = (4, 8, 1_6, 3_2)
_lowerCAmelCase : List[str] = 1_2
_lowerCAmelCase : Dict = 5_1_2
elif "large" in model_name:
_lowerCAmelCase : Union[str, Any] = 1_9_2
_lowerCAmelCase : Any = (2, 2, 1_8, 2)
_lowerCAmelCase : Dict = (6, 1_2, 2_4, 4_8)
_lowerCAmelCase : str = 1_2
_lowerCAmelCase : str = 7_6_8
# set label information
_lowerCAmelCase : List[str] = 1_5_0
_lowerCAmelCase : Union[str, Any] = 'huggingface/label-files'
_lowerCAmelCase : List[Any] = 'ade20k-id2label.json'
_lowerCAmelCase : List[str] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Tuple = {int(_A ): v for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
_lowerCAmelCase : List[Any] = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = dct.pop(_A )
_lowerCAmelCase : Tuple = val
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
_lowerCAmelCase : Dict = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Tuple = in_proj_weight[:dim, :]
_lowerCAmelCase : List[Any] = in_proj_bias[: dim]
_lowerCAmelCase : Dict = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase : Tuple = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase : Dict = in_proj_weight[
-dim :, :
]
_lowerCAmelCase : Any = in_proj_bias[-dim :]
# fmt: on
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = x.shape
_lowerCAmelCase : List[str] = x.reshape(_A , 4 , in_channel // 4 )
_lowerCAmelCase : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = x.shape
_lowerCAmelCase : Dict = x.reshape(_A , in_channel // 4 , 4 )
_lowerCAmelCase : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = x.shape[0]
_lowerCAmelCase : Tuple = x.reshape(4 , in_channel // 4 )
_lowerCAmelCase : Any = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = x.shape[0]
_lowerCAmelCase : Tuple = x.reshape(in_channel // 4 , 4 )
_lowerCAmelCase : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
_lowerCAmelCase : int = model_name_to_url[model_name]
_lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(_A , map_location='cpu' , file_name=_A )[
'state_dict'
]
for name, param in state_dict.items():
print(_A , param.shape )
_lowerCAmelCase : Optional[Any] = get_upernet_config(_A )
_lowerCAmelCase : List[str] = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase : List[Any] = state_dict.pop(_A )
if "bn" in key:
_lowerCAmelCase : Optional[int] = key.replace('bn' , 'batch_norm' )
_lowerCAmelCase : Dict = val
# rename keys
_lowerCAmelCase : Optional[int] = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowerCAmelCase : List[str] = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
_lowerCAmelCase : int = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
_lowerCAmelCase : List[Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowerCAmelCase : Optional[Any] = Image.open(requests.get(_A , stream=_A ).raw ).convert('RGB' )
_lowerCAmelCase : str = SegformerImageProcessor()
_lowerCAmelCase : List[Any] = processor(_A , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(_A )
_lowerCAmelCase : Tuple = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
_lowerCAmelCase : Dict = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
_lowerCAmelCase : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_A )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : Tuple = logging.getLogger()
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = '\n'.join(_A )
Path(_A ).open('w' ).writelines(_A )
lowerCAmelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
lowerCAmelCase : List[Any] = """sshleifer/bart-tiny-random"""
lowerCAmelCase : Optional[Any] = """sshleifer/tiny-mbart"""
lowerCAmelCase : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_lowerCAmelCase : Optional[Any] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowerCAmelCase : Any = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case__ , snake_case__ )
_lowerCAmelCase : str = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_lowerCAmelCase : Any = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowerCAmelCase : List[Any] = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_generate()
assert Path(snake_case__ ).exists()
# os.remove(Path(output_file_name))
def a ( self ):
'''simple docstring'''
self.run_eval_tester(snake_case__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def a ( self , snake_case__ ):
'''simple docstring'''
self.run_eval_tester(snake_case__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_lowerCAmelCase : Tuple = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowerCAmelCase : str = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_lowerCAmelCase : Optional[Any] = Path(self.get_auto_remove_tmp_dir() )
_lowerCAmelCase : List[Any] = str(tmp_dir / 'scores.json' )
_lowerCAmelCase : List[Any] = str(tmp_dir / 'val.target' )
_dump_articles(snake_case__ , text['en'] )
_dump_articles(snake_case__ , text['de'] )
_lowerCAmelCase : Union[str, Any] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowerCAmelCase : List[Any] = F'\n run_eval_search.py\n {model}\n {str(snake_case__ )}\n {str(snake_case__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case__ , 'argv' , snake_case__ ):
with CaptureStdout() as cs:
run_search()
_lowerCAmelCase : int = [' num_beams | length_penalty', model, 'Best score args']
_lowerCAmelCase : Any = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case__ ).exists()
os.remove(Path(snake_case__ ) )
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "gptj"
__magic_name__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Union[str, Any] = n_positions
_lowerCAmelCase : Optional[Any] = n_embd
_lowerCAmelCase : Optional[int] = n_layer
_lowerCAmelCase : Dict = n_head
_lowerCAmelCase : Optional[int] = n_inner
_lowerCAmelCase : Dict = rotary_dim
_lowerCAmelCase : List[Any] = activation_function
_lowerCAmelCase : Tuple = resid_pdrop
_lowerCAmelCase : Tuple = embd_pdrop
_lowerCAmelCase : Optional[Any] = attn_pdrop
_lowerCAmelCase : List[str] = layer_norm_epsilon
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : Any = bos_token_id
_lowerCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
_lowerCAmelCase : Optional[Any] = 0
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
_lowerCAmelCase : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def a ( self ):
'''simple docstring'''
return self._config.n_head
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Tuple = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Tuple = seqlen + 2
_lowerCAmelCase : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : List[Any] = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Dict = common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase : Tuple = ordered_inputs['attention_mask'].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def a ( self ):
'''simple docstring'''
return 13
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if tokenize_kwargs is None:
_lowerCAmelCase : Tuple = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
_lowerCAmelCase : Tuple = truncation
_lowerCAmelCase : int = tokenize_kwargs
_lowerCAmelCase : Any = {}
if return_tensors is not None:
_lowerCAmelCase : Any = return_tensors
return preprocess_params, {}, postprocess_params
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.framework
_lowerCAmelCase : List[str] = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
return model_inputs
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model(**snake_case__ )
return model_outputs
def a ( self , snake_case__ , snake_case__=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return super().__call__(*snake_case__ , **snake_case__ )
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=0.0 , snake_case__ = None , snake_case__ = "geglu" , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = "layer_norm" , snake_case__ = False , ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = only_cross_attention
_lowerCAmelCase : Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
_lowerCAmelCase : List[str] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_lowerCAmelCase : List[Any] = AdaLayerNorm(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
_lowerCAmelCase : List[Any] = AdaLayerNormZero(snake_case__ , snake_case__ )
else:
_lowerCAmelCase : Optional[int] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
_lowerCAmelCase : Optional[Any] = Attention(
query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_lowerCAmelCase : Optional[Any] = (
AdaLayerNorm(snake_case__ , snake_case__ )
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
)
_lowerCAmelCase : List[Any] = Attention(
query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none
else:
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : int = None
# 3. Feed-forward
_lowerCAmelCase : Optional[int] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
_lowerCAmelCase : Union[str, Any] = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__ )
# let chunk size default to None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = 0
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = chunk_size
_lowerCAmelCase : Tuple = dim
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
_lowerCAmelCase : Union[str, Any] = self.norma(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
_lowerCAmelCase : str = self.norma(
snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype )
else:
_lowerCAmelCase : Union[str, Any] = self.norma(snake_case__ )
_lowerCAmelCase : str = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_lowerCAmelCase : Union[str, Any] = self.attna(
snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , )
if self.use_ada_layer_norm_zero:
_lowerCAmelCase : int = gate_msa.unsqueeze(1 ) * attn_output
_lowerCAmelCase : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_lowerCAmelCase : List[str] = (
self.norma(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else self.norma(snake_case__ )
)
_lowerCAmelCase : Optional[int] = self.attna(
snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Optional[int] = attn_output + hidden_states
# 3. Feed-forward
_lowerCAmelCase : Optional[Any] = self.norma(snake_case__ )
if self.use_ada_layer_norm_zero:
_lowerCAmelCase : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
_lowerCAmelCase : Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_lowerCAmelCase : str = torch.cat(
[self.ff(snake_case__ ) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_lowerCAmelCase : Tuple = self.ff(snake_case__ )
if self.use_ada_layer_norm_zero:
_lowerCAmelCase : int = gate_mlp.unsqueeze(1 ) * ff_output
_lowerCAmelCase : Dict = ff_output + hidden_states
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = 4 , snake_case__ = 0.0 , snake_case__ = "geglu" , snake_case__ = False , ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = int(dim * mult )
_lowerCAmelCase : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_lowerCAmelCase : Tuple = GELU(snake_case__ , snake_case__ )
if activation_fn == "gelu-approximate":
_lowerCAmelCase : Any = GELU(snake_case__ , snake_case__ , approximate='tanh' )
elif activation_fn == "geglu":
_lowerCAmelCase : Tuple = GEGLU(snake_case__ , snake_case__ )
elif activation_fn == "geglu-approximate":
_lowerCAmelCase : Any = ApproximateGELU(snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = nn.ModuleList([] )
# project in
self.net.append(snake_case__ )
# project dropout
self.net.append(nn.Dropout(snake_case__ ) )
# project out
self.net.append(nn.Linear(snake_case__ , snake_case__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case__ ) )
def a ( self , snake_case__ ):
'''simple docstring'''
for module in self.net:
_lowerCAmelCase : List[Any] = module(snake_case__ )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = "none" ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = nn.Linear(snake_case__ , snake_case__ )
_lowerCAmelCase : List[Any] = approximate
def a ( self , snake_case__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(snake_case__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.proj(snake_case__ )
_lowerCAmelCase : str = self.gelu(snake_case__ )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Tuple = nn.Linear(snake_case__ , dim_out * 2 )
def a ( self , snake_case__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(snake_case__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.proj(snake_case__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(snake_case__ )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(snake_case__ , snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.proj(snake_case__ )
return x * torch.sigmoid(1.702 * x )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.Embedding(snake_case__ , snake_case__ )
_lowerCAmelCase : List[Any] = nn.SiLU()
_lowerCAmelCase : Optional[int] = nn.Linear(snake_case__ , embedding_dim * 2 )
_lowerCAmelCase : Optional[int] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.linear(self.silu(self.emb(snake_case__ ) ) )
_lowerCAmelCase : List[Any] = torch.chunk(snake_case__ , 2 )
_lowerCAmelCase : Optional[Any] = self.norm(snake_case__ ) * (1 + scale) + shift
return x
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__ )
_lowerCAmelCase : Dict = nn.SiLU()
_lowerCAmelCase : Tuple = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__ )
_lowerCAmelCase : Optional[Any] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1E-6 )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__ ) ) )
_lowerCAmelCase : Any = emb.chunk(6 , dim=1 )
_lowerCAmelCase : Any = self.norm(snake_case__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = 1E-5 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = num_groups
_lowerCAmelCase : Dict = eps
if act_fn is None:
_lowerCAmelCase : Union[str, Any] = None
else:
_lowerCAmelCase : List[Any] = get_activation(snake_case__ )
_lowerCAmelCase : int = nn.Linear(snake_case__ , out_dim * 2 )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if self.act:
_lowerCAmelCase : List[Any] = self.act(snake_case__ )
_lowerCAmelCase : Optional[int] = self.linear(snake_case__ )
_lowerCAmelCase : str = emb[:, :, None, None]
_lowerCAmelCase : Any = emb.chunk(2 , dim=1 )
_lowerCAmelCase : int = F.group_norm(snake_case__ , self.num_groups , eps=self.eps )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase (_A ):
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_lowerCAmelCase : int = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
_lowerCAmelCase : str = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
_lowerCAmelCase : Optional[Any] = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
_lowerCAmelCase : str = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
_lowerCAmelCase : List[str] = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
_lowerCAmelCase : int = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
_lowerCAmelCase : str = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
_lowerCAmelCase : Dict = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
_lowerCAmelCase : int = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
_lowerCAmelCase : Optional[Any] = key.replace('image_encoder.module' , 'flava.image_model' )
_lowerCAmelCase : Tuple = key.replace('text_encoder.module' , 'flava.text_model' )
_lowerCAmelCase : int = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
_lowerCAmelCase : int = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
_lowerCAmelCase : Optional[int] = key.replace('text_projection' , 'flava.text_projection' )
_lowerCAmelCase : Dict = key.replace('image_projection' , 'flava.image_projection' )
_lowerCAmelCase : Tuple = value.float()
for key, value in codebook_state_dict.items():
_lowerCAmelCase : str = value
return upgrade
@torch.no_grad()
def lowercase (_A , _A , _A , _A=None ):
"""simple docstring"""
if config_path is not None:
_lowerCAmelCase : Optional[Any] = FlavaConfig.from_pretrained(_A )
else:
_lowerCAmelCase : Optional[int] = FlavaConfig()
_lowerCAmelCase : Optional[int] = FlavaForPreTraining(_A ).eval()
_lowerCAmelCase : str = convert_dalle_checkpoint(_A , _A , save_checkpoint=_A )
if os.path.exists(_A ):
_lowerCAmelCase : Optional[Any] = torch.load(_A , map_location='cpu' )
else:
_lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(_A , map_location='cpu' )
_lowerCAmelCase : List[Any] = upgrade_state_dict(_A , _A )
hf_model.load_state_dict(_A )
_lowerCAmelCase : str = hf_model.state_dict()
_lowerCAmelCase : str = count_parameters(_A )
_lowerCAmelCase : Any = count_parameters(_A ) + count_parameters(_A )
assert torch.allclose(_A , _A , atol=1E-3 )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : Any = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : str = {"""vocab_file""": """vocab.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
lowerCAmelCase : List[Any] = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
lowerCAmelCase : List[Any] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ConvBertTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : str = getattr(snake_case__ , normalizer_state.pop('type' ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : Any = strip_accents
_lowerCAmelCase : Tuple = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = do_lower_case
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : List[str] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : Optional[Any] = BlipImageProcessor()
_lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
_lowerCAmelCase : str = BlipProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def a ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def a ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def a ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
_lowerCAmelCase : Optional[Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Any = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = image_processor(snake_case__ , return_tensors='np' )
_lowerCAmelCase : Union[str, Any] = processor(images=snake_case__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Any = 'lower newer'
_lowerCAmelCase : str = processor(text=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Optional[Any] = 'lower newer'
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Dict = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : int = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Tuple = processor.batch_decode(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Any = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Any = 'lower newer'
_lowerCAmelCase : str = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : dict[int, int] = {}
_lowerCAmelCase : str = 2
while True:
_lowerCAmelCase : Optional[Any] = factor_map.pop(_A , _A )
if factor:
_lowerCAmelCase : Optional[int] = factor + prime
while x in factor_map:
x += factor
_lowerCAmelCase : Dict = factor
else:
_lowerCAmelCase : Optional[Any] = prime
yield prime
prime += 1
def lowercase (_A = 1E10 ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = sieve()
_lowerCAmelCase : Union[str, Any] = 1
while True:
_lowerCAmelCase : List[Any] = next(_A )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_A )
n += 2
if __name__ == "__main__":
print(solution())
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = tempfile.mkdtemp()
_lowerCAmelCase : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowerCAmelCase : int = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case__ , snake_case__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Any = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : List[Any] = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
_lowerCAmelCase : Optional[Any] = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase : int = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case__ )
self.assertIsInstance(processor_fast.tokenizer , snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case__ )
self.assertIsInstance(processor_fast.image_processor , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
_lowerCAmelCase : Any = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : List[str] = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = image_processor(snake_case__ , return_tensors='np' )
_lowerCAmelCase : Optional[Any] = processor(images=snake_case__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Optional[int] = 'lower newer'
_lowerCAmelCase : Any = processor(text=snake_case__ )
_lowerCAmelCase : List[str] = tokenizer(snake_case__ , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Tuple = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Tuple = 'lower newer'
_lowerCAmelCase : int = self.prepare_image_inputs()
_lowerCAmelCase : Dict = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Tuple = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Dict = processor.batch_decode(snake_case__ )
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Tuple = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Tuple = 'lower newer'
_lowerCAmelCase : int = self.prepare_image_inputs()
_lowerCAmelCase : str = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
def lowercase (_A , _A , _A ):
"""simple docstring"""
def update_area_of_max_square(_A , _A ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_lowerCAmelCase : Any = update_area_of_max_square(_A , col + 1 )
_lowerCAmelCase : Any = update_area_of_max_square(row + 1 , col + 1 )
_lowerCAmelCase : List[Any] = update_area_of_max_square(row + 1 , _A )
if mat[row][col]:
_lowerCAmelCase : Dict = 1 + min([right, diagonal, down] )
_lowerCAmelCase : Tuple = max(largest_square_area[0] , _A )
return sub_problem_sol
else:
return 0
_lowerCAmelCase : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowercase (_A , _A , _A ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_A , _A , _A ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_lowerCAmelCase : str = update_area_of_max_square_using_dp_array(_A , col + 1 , _A )
_lowerCAmelCase : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _A )
_lowerCAmelCase : Dict = update_area_of_max_square_using_dp_array(row + 1 , _A , _A )
if mat[row][col]:
_lowerCAmelCase : Dict = 1 + min([right, diagonal, down] )
_lowerCAmelCase : Dict = max(largest_square_area[0] , _A )
_lowerCAmelCase : Any = sub_problem_sol
return sub_problem_sol
else:
return 0
_lowerCAmelCase : Optional[int] = [0]
_lowerCAmelCase : List[str] = [[-1] * cols for _ in range(_A )]
update_area_of_max_square_using_dp_array(0 , 0 , _A )
return largest_square_area[0]
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = [[0] * (cols + 1) for _ in range(rows + 1 )]
_lowerCAmelCase : Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : List[Any] = dp_array[row][col + 1]
_lowerCAmelCase : Union[str, Any] = dp_array[row + 1][col + 1]
_lowerCAmelCase : Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
_lowerCAmelCase : str = 1 + min(_A , _A , _A )
_lowerCAmelCase : Union[str, Any] = max(dp_array[row][col] , _A )
else:
_lowerCAmelCase : int = 0
return largest_square_area
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = [0] * (cols + 1)
_lowerCAmelCase : Dict = [0] * (cols + 1)
_lowerCAmelCase : Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : int = current_row[col + 1]
_lowerCAmelCase : Optional[int] = next_row[col + 1]
_lowerCAmelCase : Any = next_row[col]
if mat[row][col] == 1:
_lowerCAmelCase : str = 1 + min(_A , _A , _A )
_lowerCAmelCase : Union[str, Any] = max(current_row[col] , _A )
else:
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Union[str, Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.