code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=7 , lowercase_ : Tuple=3 , lowercase_ : Tuple=30 , lowercase_ : Union[str, Any]=400 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=None , lowercase_ : Optional[int]=True , lowercase_ : Tuple=[0.5, 0.5, 0.5] , lowercase_ : Any=[0.5, 0.5, 0.5] , lowercase_ : Any=True , lowercase_ : Union[str, Any]=1 / 255 , lowercase_ : List[Any]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowercase_ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} lowercase_ : Dict = parent lowercase_ : Optional[Any] = batch_size lowercase_ : Optional[int] = num_channels lowercase_ : Dict = min_resolution lowercase_ : int = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[int] = size lowercase_ : Optional[int] = do_normalize lowercase_ : Union[str, Any] = image_mean lowercase_ : List[Any] = image_std lowercase_ : str = do_rescale lowercase_ : List[Any] = rescale_factor lowercase_ : Optional[Any] = do_pad def SCREAMING_SNAKE_CASE_ ( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Dict=False ): if not batched: lowercase_ : Optional[int] = image_inputs[0] if isinstance(lowercase_ , Image.Image ): lowercase_ , lowercase_ : Optional[int] = image.size else: lowercase_ , lowercase_ : List[Any] = image.shape[1], image.shape[2] if w < h: lowercase_ : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w ) lowercase_ : Any = self.size["""shortest_edge"""] elif w > h: lowercase_ : Optional[Any] = self.size["""shortest_edge"""] lowercase_ : Tuple = int(self.size["""shortest_edge"""] * w / h ) else: lowercase_ : Union[str, Any] = self.size["""shortest_edge"""] lowercase_ : str = self.size["""shortest_edge"""] else: lowercase_ : int = [] for image in image_inputs: lowercase_ , lowercase_ : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase_ : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0] lowercase_ : int = max(lowercase_ , key=lambda lowercase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = DeformableDetrImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowercase_ , """image_std""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) ) self.assertTrue(hasattr(lowercase_ , """do_pad""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , lowercase_ ) lowercase_ : Dict = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): pass def SCREAMING_SNAKE_CASE_ ( self : Dict ): # Initialize image_processing lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image ) # Test not batched input lowercase_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase_ , lowercase_ : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ ) lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): # Initialize image_processing lowercase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray ) # Test not batched input lowercase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : Any = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase_ : Dict = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self : Any ): # Initialize image_processing lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor ) # Test not batched input lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase_ : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): # prepare image and target lowercase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: lowercase_ : Tuple = json.loads(f.read() ) lowercase_ : Union[str, Any] = {"""image_id""": 39769, """annotations""": target} # encode them lowercase_ : Tuple = DeformableDetrImageProcessor() lowercase_ : Optional[Any] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" ) # verify pixel values lowercase_ : Dict = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ ) lowercase_ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) ) # verify area lowercase_ : Optional[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) ) # verify boxes lowercase_ : str = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ ) lowercase_ : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1E-3 ) ) # verify image_id lowercase_ : Optional[int] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) ) # verify is_crowd lowercase_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) ) # verify class_labels lowercase_ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) ) # verify orig_size lowercase_ : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) ) # verify size lowercase_ : Optional[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # prepare image, target and masks_path lowercase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: lowercase_ : Union[str, Any] = json.loads(f.read() ) lowercase_ : str = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} lowercase_ : Union[str, Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them lowercase_ : Optional[Any] = DeformableDetrImageProcessor(format="""coco_panoptic""" ) lowercase_ : int = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" ) # verify pixel values lowercase_ : Optional[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ ) lowercase_ : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) ) # verify area lowercase_ : List[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) ) # verify boxes lowercase_ : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ ) lowercase_ : Union[str, Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1E-3 ) ) # verify image_id lowercase_ : Optional[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) ) # verify is_crowd lowercase_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) ) # verify class_labels lowercase_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) ) # verify masks lowercase_ : List[str] = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ ) # verify orig_size lowercase_ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) ) # verify size lowercase_ : List[str] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
21
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( UpperCAmelCase__ : Any ) -> str: lowercase_ : List[Any] = filter(lambda UpperCAmelCase__ : p.requires_grad , model.parameters() ) lowercase_ : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params _lowercase : Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Tuple: if metric == "rouge2": lowercase_ : List[str] = """{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": lowercase_ : List[str] = """{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": lowercase_ : int = """{val_avg_em:.4f}-{step_count}""" else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' """ function.""" ) lowercase_ : Dict = ModelCheckpoint( dirpath=UpperCAmelCase__ , filename=UpperCAmelCase__ , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]: return EarlyStopping( monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=UpperCAmelCase__ , verbose=UpperCAmelCase__ , ) class __magic_name__ ( pl.Callback): def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Any , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowercase_ ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule , lowercase_ : str , lowercase_ : Any=True ): logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) lowercase_ : Union[str, Any] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results lowercase_ : Optional[int] = Path(pl_module.hparams.output_dir ) if type_path == "test": lowercase_ : Optional[Any] = od / """test_results.txt""" lowercase_ : str = od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowercase_ : Any = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' lowercase_ : Tuple = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=lowercase_ ) generations_file.parent.mkdir(exist_ok=lowercase_ ) with open(lowercase_ , """a+""" ) as writer: for key in sorted(lowercase_ ): if key in ["log", "progress_bar", "preds"]: continue lowercase_ : List[str] = metrics[key] if isinstance(lowercase_ , torch.Tensor ): lowercase_ : List[str] = val.item() lowercase_ : int = f'''{key}: {val:.6f}\n''' writer.write(lowercase_ ) if not save_generations: return if "preds" in metrics: lowercase_ : int = """\n""".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(lowercase_ ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Any , lowercase_ : Optional[Any] ): try: lowercase_ : List[Any] = pl_module.model.model.num_parameters() except AttributeError: lowercase_ : Union[str, Any] = pl_module.model.num_parameters() lowercase_ : Union[str, Any] = count_trainable_parameters(lowercase_ ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowercase_ , lowercase_ , """test""" ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : Optional[int] ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
21
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: lowercase_ : Any = prime_factors(UpperCAmelCase__ ) if is_square_free(UpperCAmelCase__ ): return -1 if len(UpperCAmelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[str] = """laion/clap-htsat-unfused""" lowercase_ : Dict = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **lowercase_ : Any ): return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : str ): return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Union[str, Any] = self.get_tokenizer() lowercase_ : Optional[Any] = self.get_feature_extractor() lowercase_ : str = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[int] = self.get_feature_extractor(do_normalize=lowercase_ , padding_value=1.0 ) lowercase_ : Optional[int] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Dict = self.get_feature_extractor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) lowercase_ : Optional[int] = floats_list((3, 1000) ) lowercase_ : int = feature_extractor(lowercase_ , return_tensors="""np""" ) lowercase_ : List[Any] = processor(audios=lowercase_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_feature_extractor() lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) lowercase_ : Optional[Any] = """This is a test string""" lowercase_ : str = processor(text=lowercase_ ) lowercase_ : int = tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.get_feature_extractor() lowercase_ : Dict = self.get_tokenizer() lowercase_ : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(lowercase_ ) lowercase_ : Tuple = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = self.get_feature_extractor() lowercase_ : int = self.get_tokenizer() lowercase_ : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
21
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Dict = { "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"], "tokenization_biogpt": ["BioGptTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "BioGptForCausalLM", "BioGptForTokenClassification", "BioGptForSequenceClassification", "BioGptModel", "BioGptPreTrainedModel", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
21
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> dict[str, float]: if (voltage, current, resistance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance < 0: raise ValueError("""Resistance cannot be negative""" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]: # Initialise PyTorch model lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _lowercase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : list ) -> float: lowercase_ : Tuple = 0 while len(UpperCAmelCase__ ) > 1: lowercase_ : Optional[Any] = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): lowercase_ : Optional[Any] = files.index(min(UpperCAmelCase__ ) ) temp += files[min_index] files.pop(UpperCAmelCase__ ) files.append(UpperCAmelCase__ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : str = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : Optional[int] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "PIL.Image.Image" UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Tuple ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : int = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Union[str, Any] = {} lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : int = PIL.Image.open(lowercase_ ) else: lowercase_ : str = path.split("""::""" )[-1] try: lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : str = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Dict = BytesIO(f.read() ) lowercase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : Optional[int] = storage.field("""bytes""" ) else: lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : Dict = storage.field("""path""" ) else: lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : Optional[int] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Tuple = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Optional[Any] ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : int = f.read() return bytes_ lowercase_ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : Any = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : Tuple = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : List[Any] = array.dtype lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Dict = dtype.kind lowercase_ : List[Any] = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : int = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : str = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
21
1
'''simple docstring''' import math def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCamelCase ( UpperCAmelCase__ : int = 10001 ) -> int: try: lowercase_ : Any = int(UpperCAmelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) lowercase_ : list[int] = [] lowercase_ : List[str] = 2 while len(UpperCAmelCase__ ) < nth: if is_prime(UpperCAmelCase__ ): primes.append(UpperCAmelCase__ ) num += 1 else: num += 1 return primes[len(UpperCAmelCase__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
21
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
21
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple=7 , lowercase_ : List[Any]=3 , lowercase_ : str=18 , lowercase_ : Optional[Any]=30 , lowercase_ : Dict=400 , lowercase_ : int=True , lowercase_ : List[Any]=32 , lowercase_ : List[str]=True , ): lowercase_ : List[str] = parent lowercase_ : List[Any] = batch_size lowercase_ : Tuple = num_channels lowercase_ : Union[str, Any] = image_size lowercase_ : Any = min_resolution lowercase_ : Union[str, Any] = max_resolution lowercase_ : List[str] = do_resize lowercase_ : List[str] = size_divisor lowercase_ : List[Any] = do_rescale def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = GLPNImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Optional[Any] = GLPNImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size_divisor""" ) ) self.assertTrue(hasattr(lowercase_ , """resample""" ) ) self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # Initialize image_processing lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): # Initialize image_processing lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # Initialize image_processing lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
21
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = DistilBertTokenizer UpperCamelCase__ = DistilBertTokenizerFast UpperCamelCase__ = True @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
21
1
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowercase : Optional[Any] = 16 _lowercase : Any = 32 def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 ) -> Optional[Any]: lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowercase_ : Any = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCAmelCase__ : str ): # max_length=None => use the model max length (it's actually the default) lowercase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase_ : List[str] = datasets.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCAmelCase__ : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase_ : List[Any] = 16 elif accelerator.mixed_precision != "no": lowercase_ : List[str] = 8 else: lowercase_ : Optional[Any] = None return tokenizer.pad( UpperCAmelCase__ , padding="""longest""" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowercase_ : Tuple = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , drop_last=UpperCAmelCase__ ) lowercase_ : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ) -> str: # Initialize accelerator lowercase_ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ : Tuple = config["""lr"""] lowercase_ : Optional[Any] = int(config["""num_epochs"""] ) lowercase_ : List[Any] = int(config["""seed"""] ) lowercase_ : int = int(config["""batch_size"""] ) lowercase_ : int = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation lowercase_ : List[str] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowercase_ : int = batch_size // MAX_GPU_BATCH_SIZE lowercase_ : Optional[int] = MAX_GPU_BATCH_SIZE set_seed(UpperCAmelCase__ ) lowercase_ , lowercase_ : str = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase_ : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer lowercase_ : Union[str, Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase__ ) # Instantiate scheduler lowercase_ : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = accelerator.prepare( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Now we train the model for epoch in range(UpperCAmelCase__ ): model.train() for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase_ : Union[str, Any] = model(**UpperCAmelCase__ ) lowercase_ : int = outputs.loss lowercase_ : Any = loss / gradient_accumulation_steps accelerator.backward(UpperCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ : str = model(**UpperCAmelCase__ ) lowercase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 ) lowercase_ , lowercase_ : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , ) lowercase_ : Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ ) def lowerCamelCase ( ) -> Tuple: lowercase_ : str = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowercase_ : int = parser.parse_args() lowercase_ : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": main()
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' import torch def lowerCamelCase ( ) -> List[str]: if torch.cuda.is_available(): lowercase_ : Any = torch.cuda.device_count() else: lowercase_ : Any = 0 print(F'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase : List[str] = { "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"], "configuration_data2vec_text": [ "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecTextConfig", "Data2VecTextOnnxConfig", ], "configuration_data2vec_vision": [ "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecVisionConfig", "Data2VecVisionOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecAudioForAudioFrameClassification", "Data2VecAudioForCTC", "Data2VecAudioForSequenceClassification", "Data2VecAudioForXVector", "Data2VecAudioModel", "Data2VecAudioPreTrainedModel", ] _lowercase : Any = [ "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ] _lowercase : Any = [ "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecVisionForImageClassification", "Data2VecVisionForMaskedImageModeling", "Data2VecVisionForSemanticSegmentation", "Data2VecVisionModel", "Data2VecVisionPreTrainedModel", ] if is_tf_available(): _lowercase : Dict = [ "TFData2VecVisionForImageClassification", "TFData2VecVisionForSemanticSegmentation", "TFData2VecVisionModel", "TFData2VecVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
'''simple docstring''' import os import numpy import onnx def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple: lowercase_ : Tuple = a.name lowercase_ : Tuple = b.name lowercase_ : Any = """""" lowercase_ : List[Any] = """""" lowercase_ : List[Any] = a == b lowercase_ : Union[str, Any] = name_a lowercase_ : Optional[Any] = name_b return res def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]: lowercase_ : int = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : Optional[Any] = inits[i].name lowercase_ : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]: lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ ) lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ ) lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ : List[Any] = list(model.graph.initializer ) lowercase_ : int = set() lowercase_ : int = {} lowercase_ : str = [] lowercase_ : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) lowercase_ : Dict = inits[j].data_type lowercase_ : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size lowercase_ : int = inits[i].name lowercase_ : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowercase_ : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = """optimized_""" + model_file_name lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
21
1
'''simple docstring''' import functools def lowerCamelCase ( UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ) -> int: # Validation if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ): raise ValueError("""The parameter days should be a list of integers""" ) if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ): raise ValueError("""The parameter costs should be a list of three integers""" ) if len(UpperCAmelCase__ ) == 0: return 0 if min(UpperCAmelCase__ ) <= 0: raise ValueError("""All days elements should be greater than 0""" ) if max(UpperCAmelCase__ ) >= 366: raise ValueError("""All days elements should be less than 366""" ) lowercase_ : Optional[int] = set(UpperCAmelCase__ ) @functools.cache def dynamic_programming(UpperCAmelCase__ : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ): lowercase_ : Optional[Any] = {} lowercase_ : Tuple = {} if prompt is not None: lowercase_ : Tuple = prompt if generate_kwargs is not None: lowercase_ : List[str] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase_ : List[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase_ : str = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ): lowercase_ : List[Any] = load_image(lowercase_ ) if prompt is not None: if not isinstance(lowercase_ , lowercase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase_ : List[Any] = self.model.config.model_type if model_type == "git": lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase_ : str = None return model_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowercase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase_ : Any = None if generate_kwargs is None: lowercase_ : Optional[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase_ : Dict = model_inputs.pop(self.model.main_input_name ) lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ): lowercase_ : List[str] = [] for output_ids in model_outputs: lowercase_ : Union[str, Any] = { """generated_text""": self.tokenizer.decode( lowercase_ , skip_special_tokens=lowercase_ , ) } records.append(lowercase_ ) return records
21
1
'''simple docstring''' import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , *lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , **lowercase_ : int ): super().__init__(*lowercase_ , **lowercase_ ) lowercase_ : Dict = eval_examples lowercase_ : List[Any] = post_process_function def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : str = "eval" ): lowercase_ : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset lowercase_ : str = self.get_eval_dataloader(lowercase_ ) lowercase_ : Tuple = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowercase_ : Optional[Any] = self.compute_metrics lowercase_ : List[Any] = None lowercase_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowercase_ : Dict = time.time() try: lowercase_ : Any = eval_loop( lowercase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: lowercase_ : int = compute_metrics lowercase_ : List[Any] = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowercase_ : Optional[int] = self.post_process_function(lowercase_ , lowercase_ , output.predictions ) lowercase_ : Union[str, Any] = self.compute_metrics(lowercase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowercase_ : str = metrics.pop(lowercase_ ) metrics.update(output.metrics ) else: lowercase_ : List[str] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowercase_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowercase_ : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ ) return metrics def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=None , lowercase_ : str = "test" ): lowercase_ : Tuple = self.get_test_dataloader(lowercase_ ) # Temporarily disable metric computation, we will do it in the loop here. lowercase_ : str = self.compute_metrics lowercase_ : Union[str, Any] = None lowercase_ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowercase_ : Any = time.time() try: lowercase_ : Optional[int] = eval_loop( lowercase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: lowercase_ : Optional[int] = compute_metrics lowercase_ : int = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowercase_ : Any = self.post_process_function(lowercase_ , lowercase_ , output.predictions , """predict""" ) lowercase_ : Dict = self.compute_metrics(lowercase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowercase_ : int = metrics.pop(lowercase_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
21
'''simple docstring''' class __magic_name__ : def __init__( self : int , lowercase_ : list ): lowercase_ : Dict = set_counts lowercase_ : List[Any] = max(lowercase_ ) lowercase_ : str = len(lowercase_ ) lowercase_ : str = [1] * num_sets lowercase_ : Dict = list(range(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : List[Any] = self.get_parent(lowercase_ ) lowercase_ : Union[str, Any] = self.get_parent(lowercase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : List[str] = 0 lowercase_ : Optional[int] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : int = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : int = 0 lowercase_ : List[Any] = src_parent lowercase_ : List[Any] = self.set_counts[src_parent] lowercase_ : Tuple = max(self.max_set , lowercase_ ) return True def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ): if self.parents[disj_set] == disj_set: return disj_set lowercase_ : int = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
21
1
'''simple docstring''' import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ : UpperCamelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys())}) UpperCamelCase__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''}) UpperCamelCase__ = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''}) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Dict = self.task_name.lower() class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''train''' UpperCamelCase__ = '''dev''' UpperCamelCase__ = '''test''' class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 def __init__( self : str , lowercase_ : GlueDataTrainingArguments , lowercase_ : PreTrainedTokenizerBase , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Split] = Split.train , lowercase_ : Optional[str] = None , ): warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowercase_ , ) lowercase_ : Union[str, Any] = args lowercase_ : List[Any] = glue_processors[args.task_name]() lowercase_ : Optional[Any] = glue_output_modes[args.task_name] if isinstance(lowercase_ , lowercase_ ): try: lowercase_ : List[Any] = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file lowercase_ : str = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , ) lowercase_ : Tuple = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowercase_ , lowercase_ : List[str] = label_list[2], label_list[1] lowercase_ : Union[str, Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowercase_ : List[Any] = cached_features_file + """.lock""" with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: lowercase_ : Optional[Any] = time.time() lowercase_ : Optional[int] = torch.load(lowercase_ ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) else: logger.info(f'''Creating features from dataset file at {args.data_dir}''' ) if mode == Split.dev: lowercase_ : Union[str, Any] = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowercase_ : Tuple = self.processor.get_test_examples(args.data_dir ) else: lowercase_ : Optional[Any] = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowercase_ : Dict = examples[:limit_length] lowercase_ : List[Any] = glue_convert_examples_to_features( lowercase_ , lowercase_ , max_length=args.max_seq_length , label_list=lowercase_ , output_mode=self.output_mode , ) lowercase_ : int = time.time() torch.save(self.features , lowercase_ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self : List[Any] ): return len(self.features ) def __getitem__( self : int , lowercase_ : Optional[Any] ): return self.features[i] def SCREAMING_SNAKE_CASE_ ( self : int ): return self.label_list
21
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """decord""" ) self.check_model_type(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ): lowercase_ : Union[str, Any] = {} if frame_sampling_rate is not None: lowercase_ : Any = frame_sampling_rate if num_frames is not None: lowercase_ : Optional[Any] = num_frames lowercase_ : Union[str, Any] = {} if top_k is not None: lowercase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ): if num_frames is None: lowercase_ : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content ) lowercase_ : Optional[Any] = VideoReader(lowercase_ ) videoreader.seek(0 ) lowercase_ : Tuple = 0 lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1 lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa ) lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy() lowercase_ : Union[str, Any] = list(lowercase_ ) lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ): lowercase_ : int = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase_ : str = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
21
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Any = logging.get_logger(__name__) _lowercase : List[str] = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''xlm-roberta-xl''' def __init__( self : Optional[int] , lowercase_ : str=250880 , lowercase_ : Optional[int]=2560 , lowercase_ : Optional[Any]=36 , lowercase_ : Dict=32 , lowercase_ : int=10240 , lowercase_ : Optional[int]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Any=0.1 , lowercase_ : List[Any]=514 , lowercase_ : Any=1 , lowercase_ : Any=0.02 , lowercase_ : Tuple=1E-05 , lowercase_ : str=1 , lowercase_ : List[str]=0 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[Any]="absolute" , lowercase_ : int=True , lowercase_ : List[str]=None , **lowercase_ : Any , ): super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = vocab_size lowercase_ : int = hidden_size lowercase_ : str = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = hidden_act lowercase_ : List[str] = intermediate_size lowercase_ : int = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : List[Any] = max_position_embeddings lowercase_ : str = type_vocab_size lowercase_ : int = initializer_range lowercase_ : Any = layer_norm_eps lowercase_ : Optional[Any] = position_embedding_type lowercase_ : Optional[Any] = use_cache lowercase_ : int = classifier_dropout class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): if self.task == "multiple-choice": lowercase_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
21
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
1
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( """split_dict""" , [ SplitDict(), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 , dataset_name="""my_dataset""" )} ), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 )} ), SplitDict({"""train""": SplitInfo()} ), ] , ) def lowerCamelCase ( UpperCAmelCase__ : SplitDict ) -> Any: lowercase_ : str = split_dict._to_yaml_list() assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = SplitDict._from_yaml_list(UpperCAmelCase__ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowercase_ : List[Any] = None # the split name of split_dict takes over the name of the split info object lowercase_ : int = split_name assert split_dict == reloaded @pytest.mark.parametrize( """split_info""" , [SplitInfo(), SplitInfo(dataset_name=UpperCAmelCase__ ), SplitInfo(dataset_name="""my_dataset""" )] ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[Any]: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowercase_ : Optional[int] = asdict(SplitDict({"""train""": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
21
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ): lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : List[str] = parent lowercase_ : Any = batch_size lowercase_ : Optional[Any] = num_channels lowercase_ : Tuple = image_size lowercase_ : Optional[Any] = min_resolution lowercase_ : Dict = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[Any] = size lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass def lowerCamelCase ( ) -> Any: lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : Any = Image.open(dataset[4]["""file"""] ) lowercase_ : Dict = Image.open(dataset[5]["""file"""] ) lowercase_ : int = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Optional[int] = prepare_images() # test non-batched lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : Tuple = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : Union[str, Any] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool: lowercase_ : str = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowerCamelCase ( UpperCAmelCase__ : int = 5000 ) -> int: lowercase_ : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCAmelCase__ )] for i, pentagonal_i in enumerate(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ , len(UpperCAmelCase__ ) ): lowercase_ : List[str] = pentagonal_nums[j] lowercase_ : Union[str, Any] = pentagonal_i + pentagonal_j lowercase_ : Any = pentagonal_j - pentagonal_i if is_pentagonal(UpperCAmelCase__ ) and is_pentagonal(UpperCAmelCase__ ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
21
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
21
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase _lowercase : Optional[int] = logging.get_logger(__name__) _lowercase : int = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''longformer''' def __init__( self : Dict , lowercase_ : Union[List[int], int] = 512 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : int = 0 , lowercase_ : int = 2 , lowercase_ : int = 30522 , lowercase_ : int = 768 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 3072 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 512 , lowercase_ : int = 2 , lowercase_ : float = 0.02 , lowercase_ : float = 1E-12 , lowercase_ : bool = False , **lowercase_ : List[str] , ): super().__init__(pad_token_id=lowercase_ , **lowercase_ ) lowercase_ : List[Any] = attention_window lowercase_ : Dict = sep_token_id lowercase_ : str = bos_token_id lowercase_ : str = eos_token_id lowercase_ : Dict = vocab_size lowercase_ : Optional[int] = hidden_size lowercase_ : List[Any] = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Any = hidden_act lowercase_ : int = intermediate_size lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Optional[int] = max_position_embeddings lowercase_ : Dict = type_vocab_size lowercase_ : Tuple = initializer_range lowercase_ : Tuple = layer_norm_eps lowercase_ : Tuple = onnx_export class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : "PretrainedConfig" , lowercase_ : str = "default" , lowercase_ : "List[PatchingSpec]" = None ): super().__init__(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : Dict = True @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): if self.task == "multiple-choice": lowercase_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""global_attention_mask""", dynamic_axis), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = super().outputs if self.task == "default": lowercase_ : Optional[int] = {0: """batch"""} return outputs @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return 1E-4 @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : "PreTrainedTokenizerBase" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): lowercase_ : str = super().generate_dummy_inputs( preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowercase_ : int = torch.zeros_like(inputs["""input_ids"""] ) # make every second token global lowercase_ : List[Any] = 1 return inputs
21
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): if audio_length_in_s is None: lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate lowercase_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowercase_ : List[Any] = int(lowercase_ ) if sample_size % down_scale_factor != 0: lowercase_ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowercase_ : Any = int(lowercase_ ) lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) # set step values self.scheduler.set_timesteps(lowercase_ , device=audio.device ) lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase_ )
21
1
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool: lowercase_ : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> tuple[int, int]: lowercase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowercase_ : int = x_den * y_den * z_den lowercase_ : int = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase ( UpperCAmelCase__ : int = 35 ) -> int: lowercase_ : set = set() lowercase_ : int lowercase_ : Fraction = Fraction(0 ) lowercase_ : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowercase_ : Union[str, Any] = x_num * y_den + x_den * y_num lowercase_ : int = x_den * y_den lowercase_ : List[Any] = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : Optional[Any] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) # n=2 lowercase_ : Any = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowercase_ : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ): lowercase_ : Optional[Any] = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Dict = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Any = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : Any = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) # n=-1 lowercase_ : List[str] = x_num * y_num lowercase_ : Optional[Any] = x_den * y_num + x_num * y_den lowercase_ : Dict = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : List[str] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) # n=2 lowercase_ : Optional[Any] = x_num * x_num * y_num * y_num lowercase_ : Optional[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ): lowercase_ : List[str] = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Dict = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : Union[str, Any] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) for num, den in unique_s: total += Fraction(UpperCAmelCase__ , UpperCAmelCase__ ) return total.denominator + total.numerator if __name__ == "__main__": print(f"""{solution() = }""")
21
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Union[str, Any] = "src/transformers" _lowercase : str = "docs/source/en" _lowercase : Union[str, Any] = "." def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int: with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Union[str, Any] = f.readlines() # Find the start prompt. lowercase_ : Optional[Any] = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 lowercase_ : int = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ ) return [m.group(0 ) for m in matches] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]: lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ ) lowercase_ : List[str] = (width - text_length) // 2 lowercase_ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) -> Any: lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = None if attr_name.endswith("""Tokenizer""" ): lowercase_ : Optional[int] = slow_tokenizers lowercase_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowercase_ : Optional[Any] = fast_tokenizers lowercase_ : Dict = attr_name[:-13] elif _re_tf_models.match(UpperCAmelCase__ ) is not None: lowercase_ : str = tf_models lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0] elif _re_flax_models.match(UpperCAmelCase__ ) is not None: lowercase_ : List[str] = flax_models lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0] elif _re_pt_models.match(UpperCAmelCase__ ) is not None: lowercase_ : Tuple = pt_models lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] ) # Let's build that table! lowercase_ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns] lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowercase_ : int = {True: """✅""", False: """❌"""} for name in model_names: lowercase_ : str = model_name_to_prefix[name] lowercase_ : Any = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n" return table def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str: lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file( filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowercase_ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
1
'''simple docstring''' import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __magic_name__ ( _UpperCAmelCase): def __init__( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any]=13 , lowercase_ : List[str]=7 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=False , lowercase_ : List[str]=True , lowercase_ : int=99 , lowercase_ : Any=32 , lowercase_ : List[str]=5 , lowercase_ : List[str]=4 , lowercase_ : Optional[int]=64 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Any=3 , lowercase_ : List[Any]=4 , lowercase_ : List[str]=None , lowercase_ : str=2 , lowercase_ : Tuple=2 , lowercase_ : Optional[int]=2 , lowercase_ : Tuple=2 , lowercase_ : Optional[Any]=4 , lowercase_ : str=1 , ): lowercase_ : Union[str, Any] = parent lowercase_ : Optional[Any] = batch_size lowercase_ : List[str] = seq_length lowercase_ : Dict = is_training lowercase_ : Union[str, Any] = use_input_mask lowercase_ : Tuple = use_token_type_ids lowercase_ : Optional[int] = use_labels lowercase_ : List[Any] = vocab_size lowercase_ : Dict = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Optional[Any] = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : int = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : Optional[int] = max_position_embeddings lowercase_ : str = type_vocab_size lowercase_ : Optional[Any] = type_sequence_label_size lowercase_ : Optional[int] = initializer_range lowercase_ : Optional[int] = num_labels lowercase_ : int = num_choices lowercase_ : List[str] = scope lowercase_ : Optional[Any] = q_groups lowercase_ : Union[str, Any] = k_groups lowercase_ : Any = v_groups lowercase_ : Optional[int] = post_attention_groups lowercase_ : Union[str, Any] = intermediate_groups lowercase_ : Optional[Any] = output_groups def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ : str = None if self.use_input_mask: lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Any = None lowercase_ : Optional[int] = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ : Union[str, Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ): lowercase_ : int = SqueezeBertModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ , lowercase_ ) lowercase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Optional[int] ): lowercase_ : Optional[Any] = SqueezeBertForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Dict = SqueezeBertForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Union[str, Any] = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : int ): lowercase_ : Optional[Any] = self.num_labels lowercase_ : Optional[Any] = SqueezeBertForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : Union[str, Any] ): lowercase_ : List[Any] = self.num_labels lowercase_ : int = SqueezeBertForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Dict = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : int ): lowercase_ : Any = self.num_choices lowercase_ : Any = SqueezeBertForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ : Optional[Any] = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = self.prepare_config_and_inputs() ((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) : Union[str, Any] = config_and_inputs lowercase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase__ = ( { '''feature-extraction''': SqueezeBertModel, '''fill-mask''': SqueezeBertForMaskedLM, '''question-answering''': SqueezeBertForQuestionAnswering, '''text-classification''': SqueezeBertForSequenceClassification, '''token-classification''': SqueezeBertForTokenClassification, '''zero-shot''': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = SqueezeBertModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , dim=37 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : int = SqueezeBertModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_sentencepiece @require_tokenizers @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[str] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" ) lowercase_ : str = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) lowercase_ : List[Any] = model(lowercase_ )[0] lowercase_ : Tuple = torch.Size((1, 3) ) self.assertEqual(output.shape , lowercase_ ) lowercase_ : Dict = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] ) self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-4 ) )
21
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __magic_name__ ( ctypes.Structure): # _fields is a specific attr expected by ctypes UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def lowerCamelCase ( ) -> List[Any]: if os.name == "nt": lowercase_ : List[Any] = CursorInfo() lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> str: if os.name == "nt": lowercase_ : int = CursorInfo() lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> Any: try: hide_cursor() yield finally: show_cursor()
21
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass _lowercase : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1) _lowercase : Tuple = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __magic_name__ : UpperCamelCase__ = 42 UpperCamelCase__ = 42 class __magic_name__ : def __init__( self : Union[str, Any] , lowercase_ : Iterable[int] ): lowercase_ : Node | None = None for i in sorted(lowercase_ , reverse=lowercase_ ): lowercase_ : Optional[int] = Node(lowercase_ , self.head ) def __iter__( self : Optional[int] ): lowercase_ : Tuple = self.head while node: yield node.data lowercase_ : str = node.next_node def __len__( self : str ): return sum(1 for _ in self ) def __str__( self : List[str] ): return " -> ".join([str(lowercase_ ) for node in self] ) def lowerCamelCase ( UpperCAmelCase__ : SortedLinkedList , UpperCAmelCase__ : SortedLinkedList ) -> SortedLinkedList: return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() _lowercase : List[str] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
21
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Optional[int] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**lowercase_ ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''}) UpperCamelCase__ = field( default='''O1''', metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase_ : Optional[Any] = torch.device("""cpu""" ) lowercase_ : Tuple = 0 elif is_torch_tpu_available(): lowercase_ : Optional[int] = xm.xla_device() lowercase_ : str = 0 else: lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.n_gpu > 0
21
1
'''simple docstring''' from manim import * class __magic_name__ ( _UpperCAmelCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[Any] = Rectangle(height=0.5 , width=0.5 ) lowercase_ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowercase_ : Dict = [mem.copy() for i in range(6 )] lowercase_ : Dict = [mem.copy() for i in range(6 )] lowercase_ : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) lowercase_ : int = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) lowercase_ : Optional[int] = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 ) lowercase_ : str = Text("""CPU""" , font_size=24 ) lowercase_ : int = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase_ ) lowercase_ : Union[str, Any] = [mem.copy() for i in range(4 )] lowercase_ : Tuple = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) lowercase_ : List[Any] = Text("""GPU""" , font_size=24 ) lowercase_ : int = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase_ ) lowercase_ : Tuple = [mem.copy() for i in range(6 )] lowercase_ : List[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) lowercase_ : Any = Text("""Model""" , font_size=24 ) lowercase_ : int = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) model.move_to([3, -1.0, 0] ) self.add(lowercase_ ) lowercase_ : Tuple = [] for i, rect in enumerate(lowercase_ ): rect.set_stroke(lowercase_ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) lowercase_ : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 ) self.add(lowercase_ ) cpu_targs.append(lowercase_ ) lowercase_ : Tuple = [mem.copy() for i in range(6 )] lowercase_ : Optional[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) lowercase_ : int = Text("""Loaded Checkpoint""" , font_size=24 ) lowercase_ : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) lowercase_ : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowercase_ : Optional[int] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase_ , lowercase_ ) lowercase_ : List[str] = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) lowercase_ : str = MarkupText( f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase_ ) , Write(lowercase_ ) ) self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) ) lowercase_ : Optional[Any] = [] lowercase_ : Any = [] for i, rect in enumerate(lowercase_ ): lowercase_ : Tuple = fill.copy().set_fill(lowercase_ , opacity=0.7 ) target.move_to(lowercase_ ) first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) ) lowercase_ : Optional[int] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) ) self.play(*lowercase_ ) self.play(*lowercase_ ) self.wait()
21
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : Dict = { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json" ), } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''xlm-prophetnet''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : List[Any] , lowercase_ : Optional[float] = 0.1 , lowercase_ : Optional[Union[str, Callable]] = "gelu" , lowercase_ : Optional[int] = 30522 , lowercase_ : Optional[int] = 1024 , lowercase_ : Optional[int] = 4096 , lowercase_ : Optional[int] = 12 , lowercase_ : Optional[int] = 16 , lowercase_ : Optional[int] = 4096 , lowercase_ : Optional[int] = 12 , lowercase_ : Optional[int] = 16 , lowercase_ : Optional[float] = 0.1 , lowercase_ : Optional[float] = 0.1 , lowercase_ : Optional[int] = 512 , lowercase_ : Optional[float] = 0.02 , lowercase_ : Optional[bool] = True , lowercase_ : Optional[bool] = True , lowercase_ : Optional[int] = 0 , lowercase_ : Optional[int] = 2 , lowercase_ : Optional[int] = 32 , lowercase_ : Optional[int] = 128 , lowercase_ : Optional[bool] = False , lowercase_ : Optional[float] = 0.0 , lowercase_ : Optional[bool] = True , lowercase_ : Optional[int] = 0 , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[int] = 2 , **lowercase_ : List[Any] , ): lowercase_ : Dict = vocab_size lowercase_ : List[Any] = hidden_size lowercase_ : Optional[int] = encoder_ffn_dim lowercase_ : Any = num_encoder_layers lowercase_ : int = num_encoder_attention_heads lowercase_ : Tuple = decoder_ffn_dim lowercase_ : List[str] = num_decoder_layers lowercase_ : List[str] = num_decoder_attention_heads lowercase_ : List[Any] = max_position_embeddings lowercase_ : int = init_std # Normal(0, this parameter) lowercase_ : Tuple = activation_function # parameters for xlmprophetnet lowercase_ : Optional[Any] = ngram lowercase_ : str = num_buckets lowercase_ : str = relative_max_distance lowercase_ : Union[str, Any] = disable_ngram_loss lowercase_ : List[Any] = eps # 3 Types of Dropout lowercase_ : Optional[Any] = attention_dropout lowercase_ : str = activation_dropout lowercase_ : Any = dropout lowercase_ : Tuple = use_cache super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ): raise NotImplementedError( """This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and""" """ `num_decoder_layers`.""" )
21
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowercase_ : str = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : List[Any] = """sshleifer/tiny-gpt2""" lowercase_ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowercase_ : List[str] = PyTorchBenchmark(lowercase_ ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Dict = """sgugger/tiny-distilbert-classification""" lowercase_ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , only_pretrain_model=lowercase_ , ) lowercase_ : Optional[Any] = PyTorchBenchmark(lowercase_ ) lowercase_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[Any] = """sshleifer/tiny-gpt2""" lowercase_ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , torchscript=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowercase_ : List[str] = PyTorchBenchmark(lowercase_ ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = """sshleifer/tiny-gpt2""" lowercase_ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , fpaa=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowercase_ : Dict = PyTorchBenchmark(lowercase_ ) lowercase_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[Any] = """sshleifer/tiny-gpt2""" lowercase_ : Tuple = AutoConfig.from_pretrained(lowercase_ ) # set architectures equal to `None` lowercase_ : Union[str, Any] = None lowercase_ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowercase_ : Any = PyTorchBenchmark(lowercase_ , configs=[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : int = """sshleifer/tiny-gpt2""" lowercase_ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowercase_ : Tuple = PyTorchBenchmark(lowercase_ ) lowercase_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[Any] = """sshleifer/tiny-gpt2""" lowercase_ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase_ , multi_process=lowercase_ , ) lowercase_ : Optional[Any] = PyTorchBenchmark(lowercase_ ) lowercase_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = """sshleifer/tiny-gpt2""" lowercase_ : List[Any] = AutoConfig.from_pretrained(lowercase_ ) lowercase_ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowercase_ : Union[str, Any] = PyTorchBenchmark(lowercase_ , configs=[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Union[str, Any] = """sshleifer/tinier_bart""" lowercase_ : Optional[Any] = AutoConfig.from_pretrained(lowercase_ ) lowercase_ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowercase_ : Union[str, Any] = PyTorchBenchmark(lowercase_ , configs=[config] ) lowercase_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : str = """sshleifer/tiny-gpt2""" lowercase_ : Tuple = AutoConfig.from_pretrained(lowercase_ ) lowercase_ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowercase_ : Tuple = PyTorchBenchmark(lowercase_ , configs=[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = """sshleifer/tinier_bart""" lowercase_ : Any = AutoConfig.from_pretrained(lowercase_ ) lowercase_ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowercase_ : str = PyTorchBenchmark(lowercase_ , configs=[config] ) lowercase_ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , save_to_csv=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase_ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(lowercase_ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(lowercase_ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(lowercase_ , """train_time.csv""" ) , env_info_csv_file=os.path.join(lowercase_ , """env.csv""" ) , multi_process=lowercase_ , ) lowercase_ : Optional[Any] = PyTorchBenchmark(lowercase_ ) benchmark.run() self.assertTrue(Path(os.path.join(lowercase_ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_ , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_ , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_ , """env.csv""" ) ).exists() ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(lowercase_ : Union[str, Any] ): self.assertTrue(hasattr(lowercase_ , """sequential""" ) ) self.assertTrue(hasattr(lowercase_ , """cumulative""" ) ) self.assertTrue(hasattr(lowercase_ , """current""" ) ) self.assertTrue(hasattr(lowercase_ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase_ , """log.txt""" ) , log_print=lowercase_ , trace_memory_line_by_line=lowercase_ , multi_process=lowercase_ , ) lowercase_ : Optional[int] = PyTorchBenchmark(lowercase_ ) lowercase_ : Tuple = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(lowercase_ , """log.txt""" ) ).exists() )
21
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: lowercase_ : Any = prime_factors(UpperCAmelCase__ ) if is_square_free(UpperCAmelCase__ ): return -1 if len(UpperCAmelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _lowercase : Any = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> List[List[ImageInput]]: if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(UpperCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(UpperCAmelCase__ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = ['''pixel_values'''] def __init__( self : Optional[Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : List[str] , ): super().__init__(**lowercase_ ) lowercase_ : Optional[int] = size if size is not None else {"""shortest_edge""": 256} lowercase_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase_ : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowercase_ : List[str] = get_size_dict(lowercase_ , param_name="""crop_size""" ) lowercase_ : str = do_resize lowercase_ : Dict = size lowercase_ : Union[str, Any] = do_center_crop lowercase_ : int = crop_size lowercase_ : int = resample lowercase_ : int = do_rescale lowercase_ : str = rescale_factor lowercase_ : Tuple = offset lowercase_ : Any = do_normalize lowercase_ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): lowercase_ : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" in size: lowercase_ : str = get_resize_output_image_size(lowercase_ , size["""shortest_edge"""] , default_to_square=lowercase_ ) elif "height" in size and "width" in size: lowercase_ : List[Any] = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ): lowercase_ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["""height"""], size["""width"""]) , data_format=lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): lowercase_ : List[str] = image.astype(np.floataa ) if offset: lowercase_ : Optional[int] = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. lowercase_ : Dict = to_numpy_array(lowercase_ ) if do_resize: lowercase_ : Union[str, Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) if do_center_crop: lowercase_ : Union[str, Any] = self.center_crop(lowercase_ , size=lowercase_ ) if do_rescale: lowercase_ : Union[str, Any] = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_ ) if do_normalize: lowercase_ : Tuple = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) lowercase_ : str = to_channel_dimension_format(lowercase_ , lowercase_ ) return image def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Tuple , ): lowercase_ : int = do_resize if do_resize is not None else self.do_resize lowercase_ : Tuple = resample if resample is not None else self.resample lowercase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale lowercase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ : Tuple = offset if offset is not None else self.offset lowercase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean lowercase_ : List[str] = image_std if image_std is not None else self.image_std lowercase_ : Dict = size if size is not None else self.size lowercase_ : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase_ : List[Any] = crop_size if crop_size is not None else self.crop_size lowercase_ : Optional[Any] = get_size_dict(lowercase_ , param_name="""crop_size""" ) if not valid_images(lowercase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) lowercase_ : Union[str, Any] = make_batched(lowercase_ ) lowercase_ : Any = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] lowercase_ : List[str] = {"""pixel_values""": videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
21
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""only integers accepted as input""" ) else: lowercase_ : Dict = str(abs(UpperCAmelCase__ ) ) lowercase_ : Any = [list(UpperCAmelCase__ ) for char in range(len(UpperCAmelCase__ ) )] for index in range(len(UpperCAmelCase__ ) ): num_transpositions[index].pop(UpperCAmelCase__ ) return max( int("""""".join(list(UpperCAmelCase__ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("doctest").testmod()
21
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
21
1
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]: # Initialise PyTorch model lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _lowercase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
21
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : str = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : Optional[int] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "PIL.Image.Image" UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Tuple ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : int = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Union[str, Any] = {} lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : int = PIL.Image.open(lowercase_ ) else: lowercase_ : str = path.split("""::""" )[-1] try: lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : str = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Dict = BytesIO(f.read() ) lowercase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : Optional[int] = storage.field("""bytes""" ) else: lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : Dict = storage.field("""path""" ) else: lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : Optional[int] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Tuple = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Optional[Any] ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : int = f.read() return bytes_ lowercase_ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : Any = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : Tuple = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : List[Any] = array.dtype lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Dict = dtype.kind lowercase_ : List[Any] = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : int = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : str = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
21
1
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCamelCase ( ) -> Dict: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(UpperCAmelCase__ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def lowerCamelCase ( ) -> int: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def lowerCamelCase ( ) -> Union[str, Any]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(UpperCAmelCase__ ): http_head("""https://huggingface.co""" )
21
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
21
1
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports _lowercase : List[Any] = "\nimport os\n" _lowercase : List[str] = "\ndef foo():\n import os\n return False\n" _lowercase : List[str] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" _lowercase : Any = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" _lowercase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" _lowercase : List[Any] = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" _lowercase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" _lowercase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" _lowercase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" _lowercase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" _lowercase : Any = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("""case""" , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> Union[str, Any]: lowercase_ : Tuple = os.path.join(UpperCAmelCase__ , """test_file.py""" ) with open(UpperCAmelCase__ , """w""" ) as _tmp_file: _tmp_file.write(UpperCAmelCase__ ) lowercase_ : Optional[Any] = get_imports(UpperCAmelCase__ ) assert parsed_imports == ["os"]
21
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = DistilBertTokenizer UpperCamelCase__ = DistilBertTokenizerFast UpperCamelCase__ = True @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : list ) -> List[str]: _enforce_args(UpperCAmelCase__ , UpperCAmelCase__ ) if n == 0: return 0 lowercase_ : Dict = float("""-inf""" ) for i in range(1 , n + 1 ): lowercase_ : Optional[int] = max( UpperCAmelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCAmelCase__ ) ) return max_revue def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : list ) -> Dict: _enforce_args(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Any = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : list , UpperCAmelCase__ : list ) -> Optional[Any]: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowercase_ : Any = float("""-inf""" ) for i in range(1 , n + 1 ): lowercase_ : List[str] = max( UpperCAmelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCAmelCase__ , UpperCAmelCase__ ) , ) lowercase_ : Optional[Any] = max_revenue return max_rev[n] def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : list ) -> Tuple: _enforce_args(UpperCAmelCase__ , UpperCAmelCase__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowercase_ : int = [float("""-inf""" ) for _ in range(n + 1 )] lowercase_ : Optional[Any] = 0 for i in range(1 , n + 1 ): lowercase_ : Optional[int] = max_rev[i] for j in range(1 , i + 1 ): lowercase_ : int = max(UpperCAmelCase__ , prices[j - 1] + max_rev[i - j] ) lowercase_ : Union[str, Any] = max_revenue_i return max_rev[n] def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : list ) -> str: if n < 0: lowercase_ : str = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(UpperCAmelCase__ ) if n > len(UpperCAmelCase__ ): lowercase_ : int = ( """Each integral piece of rod must have a corresponding price. """ F'''Got n = {n} but length of prices = {len(UpperCAmelCase__ )}''' ) raise ValueError(UpperCAmelCase__ ) def lowerCamelCase ( ) -> Dict: lowercase_ : Tuple = [6, 10, 12, 15, 20, 23] lowercase_ : Optional[Any] = len(UpperCAmelCase__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowercase_ : Optional[Any] = 36 lowercase_ : Dict = top_down_cut_rod(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : int = bottom_up_cut_rod(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : int = naive_cut_rod_recursive(UpperCAmelCase__ , UpperCAmelCase__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets _lowercase : int = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" _lowercase : Optional[int] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" _lowercase : Union[str, Any] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Tuple: return float((preds == labels).mean() ) def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ) -> Dict: lowercase_ : List[str] = simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = float(fa_score(y_true=UpperCAmelCase__ , y_pred=UpperCAmelCase__ ) ) return { "accuracy": acc, "f1": fa, } def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowercase_ : List[Any] = float(pearsonr(UpperCAmelCase__ , UpperCAmelCase__ )[0] ) lowercase_ : List[Any] = float(spearmanr(UpperCAmelCase__ , UpperCAmelCase__ )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class __magic_name__ ( datasets.Metric): def SCREAMING_SNAKE_CASE_ ( self : Tuple ): if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """ """\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : List[Any] ): if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(lowercase_ , lowercase_ )} elif self.config_name == "stsb": return pearson_and_spearman(lowercase_ , lowercase_ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(lowercase_ , lowercase_ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """ """\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' # flake8: noqa # Lint as: python3 _lowercase : int = [ "VerificationMode", "Version", "disable_progress_bar", "enable_progress_bar", "is_progress_bar_enabled", "experimental", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
21
'''simple docstring''' import os import numpy import onnx def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple: lowercase_ : Tuple = a.name lowercase_ : Tuple = b.name lowercase_ : Any = """""" lowercase_ : List[Any] = """""" lowercase_ : List[Any] = a == b lowercase_ : Union[str, Any] = name_a lowercase_ : Optional[Any] = name_b return res def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]: lowercase_ : int = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : Optional[Any] = inits[i].name lowercase_ : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]: lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ ) lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ ) lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ : List[Any] = list(model.graph.initializer ) lowercase_ : int = set() lowercase_ : int = {} lowercase_ : str = [] lowercase_ : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) lowercase_ : Dict = inits[j].data_type lowercase_ : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size lowercase_ : int = inits[i].name lowercase_ : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowercase_ : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = """optimized_""" + model_file_name lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
21
1
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib _lowercase : List[str] = get_logger() _lowercase : Optional[dict] = None class __magic_name__ ( TensorFormatter[Mapping, '''jax.Array''', Mapping]): def __init__( self : Union[str, Any] , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , **lowercase_ : int ): super().__init__(features=lowercase_ ) import jax from jaxlib.xla_client import Device if isinstance(lowercase_ , lowercase_ ): raise ValueError( f'''Expected {device} to be a `str` not {type(lowercase_ )}, as `jaxlib.xla_extension.Device` ''' """is not serializable neither with `pickle` nor with `dill`. Instead you can surround """ """the device with `str()` to get its string identifier that will be internally mapped """ """to the actual `jaxlib.xla_extension.Device`.""" ) lowercase_ : List[str] = device if isinstance(lowercase_ , lowercase_ ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowercase_ : int = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) lowercase_ : Any = str(jax.devices()[0] ) lowercase_ : Optional[Any] = jnp_array_kwargs @staticmethod def SCREAMING_SNAKE_CASE_ ( ): import jax return {str(lowercase_ ): device for device in jax.devices()} def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Dict ): import jax import jax.numpy as jnp if isinstance(lowercase_ , lowercase_ ) and column: if all( isinstance(lowercase_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(lowercase_ , axis=0 ) return column def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Union[str, Any] ): import jax import jax.numpy as jnp if isinstance(lowercase_ , (str, bytes, type(lowercase_ )) ): return value elif isinstance(lowercase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() lowercase_ : List[str] = {} if isinstance(lowercase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: lowercase_ : List[str] = {"""dtype""": jnp.intaa} else: lowercase_ : str = {"""dtype""": jnp.intaa} elif isinstance(lowercase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): lowercase_ : List[Any] = {"""dtype""": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowercase_ , PIL.Image.Image ): lowercase_ : str = np.asarray(lowercase_ ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowercase_ : int = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(lowercase_ , **{**default_dtype, **self.jnp_array_kwargs} ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tuple ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(lowercase_ , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(lowercase_ , """__array__""" ) and not isinstance(lowercase_ , jax.Array ): lowercase_ : int = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowercase_ , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowercase_ ) for substruct in data_struct] ) elif isinstance(lowercase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(lowercase_ ) for substruct in data_struct] ) return self._tensorize(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : dict ): return map_nested(self._recursive_tensorize , lowercase_ , map_list=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : pa.Table ): lowercase_ : Union[str, Any] = self.numpy_arrow_extractor().extract_row(lowercase_ ) lowercase_ : Dict = self.python_features_decoder.decode_row(lowercase_ ) return self.recursive_tensorize(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : pa.Table ): lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_column(lowercase_ ) lowercase_ : str = self.python_features_decoder.decode_column(lowercase_ , pa_table.column_names[0] ) lowercase_ : Any = self.recursive_tensorize(lowercase_ ) lowercase_ : int = self._consolidate(lowercase_ ) return column def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : pa.Table ): lowercase_ : List[Any] = self.numpy_arrow_extractor().extract_batch(lowercase_ ) lowercase_ : Tuple = self.python_features_decoder.decode_batch(lowercase_ ) lowercase_ : Optional[int] = self.recursive_tensorize(lowercase_ ) for column_name in batch: lowercase_ : Union[str, Any] = self._consolidate(batch[column_name] ) return batch
21
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ): lowercase_ : Optional[Any] = {} lowercase_ : Tuple = {} if prompt is not None: lowercase_ : Tuple = prompt if generate_kwargs is not None: lowercase_ : List[str] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase_ : List[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase_ : str = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ): lowercase_ : List[Any] = load_image(lowercase_ ) if prompt is not None: if not isinstance(lowercase_ , lowercase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase_ : List[Any] = self.model.config.model_type if model_type == "git": lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase_ : str = None return model_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowercase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase_ : Any = None if generate_kwargs is None: lowercase_ : Optional[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase_ : Dict = model_inputs.pop(self.model.main_input_name ) lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ): lowercase_ : List[str] = [] for output_ids in model_outputs: lowercase_ : Union[str, Any] = { """generated_text""": self.tokenizer.decode( lowercase_ , skip_special_tokens=lowercase_ , ) } records.append(lowercase_ ) return records
21
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Any=7 , lowercase_ : Tuple=3 , lowercase_ : str=30 , lowercase_ : Optional[Any]=400 , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=True , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : Tuple=[0.5, 0.5, 0.5] , lowercase_ : int=True , lowercase_ : List[str]=1 / 255 , lowercase_ : List[Any]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowercase_ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} lowercase_ : Any = parent lowercase_ : Optional[Any] = batch_size lowercase_ : int = num_channels lowercase_ : Any = min_resolution lowercase_ : Any = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : int = size lowercase_ : List[Any] = do_normalize lowercase_ : int = image_mean lowercase_ : Union[str, Any] = image_std lowercase_ : str = do_rescale lowercase_ : List[Any] = rescale_factor lowercase_ : Union[str, Any] = do_pad def SCREAMING_SNAKE_CASE_ ( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]=False ): if not batched: lowercase_ : List[Any] = image_inputs[0] if isinstance(lowercase_ , Image.Image ): lowercase_ , lowercase_ : Dict = image.size else: lowercase_ , lowercase_ : List[Any] = image.shape[1], image.shape[2] if w < h: lowercase_ : str = int(self.size["""shortest_edge"""] * h / w ) lowercase_ : List[Any] = self.size["""shortest_edge"""] elif w > h: lowercase_ : List[str] = self.size["""shortest_edge"""] lowercase_ : int = int(self.size["""shortest_edge"""] * w / h ) else: lowercase_ : int = self.size["""shortest_edge"""] lowercase_ : str = self.size["""shortest_edge"""] else: lowercase_ : Dict = [] for image in image_inputs: lowercase_ , lowercase_ : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase_ : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[0] )[0] lowercase_ : Tuple = max(lowercase_ , key=lambda lowercase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = DetaImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = DetaImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowercase_ , """image_std""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) ) self.assertTrue(hasattr(lowercase_ , """do_pad""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass def SCREAMING_SNAKE_CASE_ ( self : Tuple ): # Initialize image_processing lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image ) # Test not batched input lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase_ , lowercase_ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ ) lowercase_ : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self : int ): # Initialize image_processing lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray ) # Test not batched input lowercase_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : str = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self : str ): # Initialize image_processing lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor ) # Test not batched input lowercase_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase_ : Optional[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values lowercase_ , lowercase_ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): # prepare image and target lowercase_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: lowercase_ : int = json.loads(f.read() ) lowercase_ : List[Any] = {"""image_id""": 39769, """annotations""": target} # encode them lowercase_ : Optional[Any] = DetaImageProcessor() lowercase_ : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" ) # verify pixel values lowercase_ : Tuple = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ ) lowercase_ : Tuple = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) ) # verify area lowercase_ : str = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) ) # verify boxes lowercase_ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ ) lowercase_ : Any = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1E-3 ) ) # verify image_id lowercase_ : Union[str, Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) ) # verify is_crowd lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) ) # verify class_labels lowercase_ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) ) # verify orig_size lowercase_ : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) ) # verify size lowercase_ : List[str] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): # prepare image, target and masks_path lowercase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: lowercase_ : List[Any] = json.loads(f.read() ) lowercase_ : List[str] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} lowercase_ : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them lowercase_ : Tuple = DetaImageProcessor(format="""coco_panoptic""" ) lowercase_ : List[Any] = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" ) # verify pixel values lowercase_ : str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ ) lowercase_ : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) ) # verify area lowercase_ : Optional[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) ) # verify boxes lowercase_ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ ) lowercase_ : List[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1E-3 ) ) # verify image_id lowercase_ : Dict = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) ) # verify is_crowd lowercase_ : int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) ) # verify class_labels lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) ) # verify masks lowercase_ : Optional[Any] = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ ) # verify orig_size lowercase_ : List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) ) # verify size lowercase_ : Dict = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
21
'''simple docstring''' class __magic_name__ : def __init__( self : int , lowercase_ : list ): lowercase_ : Dict = set_counts lowercase_ : List[Any] = max(lowercase_ ) lowercase_ : str = len(lowercase_ ) lowercase_ : str = [1] * num_sets lowercase_ : Dict = list(range(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : List[Any] = self.get_parent(lowercase_ ) lowercase_ : Union[str, Any] = self.get_parent(lowercase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : List[str] = 0 lowercase_ : Optional[int] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : int = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : int = 0 lowercase_ : List[Any] = src_parent lowercase_ : List[Any] = self.set_counts[src_parent] lowercase_ : Tuple = max(self.max_set , lowercase_ ) return True def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ): if self.parents[disj_set] == disj_set: return disj_set lowercase_ : int = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
21
1
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _lowercase : Tuple = True except ImportError: _lowercase : int = False _lowercase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase ( UpperCAmelCase__ : Namespace ) -> List[str]: return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class __magic_name__ ( _UpperCAmelCase): @staticmethod def SCREAMING_SNAKE_CASE_ ( lowercase_ : ArgumentParser ): lowercase_ : Union[str, Any] = parser.add_parser("""add-new-model""" ) add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" ) add_new_model_parser.add_argument("""--testing_file""" , type=lowercase_ , help="""Configuration file on which to run.""" ) add_new_model_parser.add_argument( """--path""" , type=lowercase_ , help="""Path to cookiecutter. Should only be used for testing purposes.""" ) add_new_model_parser.set_defaults(func=lowercase_ ) def __init__( self : List[Any] , lowercase_ : bool , lowercase_ : str , lowercase_ : int=None , *lowercase_ : str ): lowercase_ : Any = testing lowercase_ : Dict = testing_file lowercase_ : Dict = path def SCREAMING_SNAKE_CASE_ ( self : Any ): warnings.warn( """The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """ """It is not actively maintained anymore, so might give a result that won't pass all tests and quality """ """checks, you should use `transformers-cli add-new-model-like` instead.""" ) if not _has_cookiecutter: raise ImportError( """Model creation dependencies are required to use the `add_new_model` command. Install them by running """ """the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory lowercase_ : Optional[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]] if len(lowercase_ ) > 0: raise ValueError( """Several directories starting with `cookiecutter-template-` in current working directory. """ """Please clean your directory by removing all folders starting with `cookiecutter-template-` or """ """change your working directory.""" ) lowercase_ : Optional[int] = ( Path(lowercase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) lowercase_ : Optional[Any] = path_to_transformer_root / """templates""" / """adding_a_new_model""" # Execute cookiecutter if not self._testing: cookiecutter(str(lowercase_ ) ) else: with open(self._testing_file , """r""" ) as configuration_file: lowercase_ : List[Any] = json.load(lowercase_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase_ , extra_context=lowercase_ , ) lowercase_ : List[str] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0] # Retrieve configuration with open(directory + """/configuration.json""" , """r""" ) as configuration_file: lowercase_ : int = json.load(lowercase_ ) lowercase_ : Optional[int] = configuration["""lowercase_modelname"""] lowercase_ : List[Any] = configuration["""generate_tensorflow_pytorch_and_flax"""] os.remove(f'''{directory}/configuration.json''' ) lowercase_ : Union[str, Any] = """PyTorch""" in generate_tensorflow_pytorch_and_flax lowercase_ : Optional[Any] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax lowercase_ : Union[str, Any] = """Flax""" in generate_tensorflow_pytorch_and_flax lowercase_ : Tuple = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(lowercase_ , exist_ok=lowercase_ ) os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowercase_ ) # Tests require submodules as they have parent imports with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ): pass shutil.move( f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , ) shutil.move( f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(lowercase_ : Any ): with open(lowercase_ , """r""" ) as f: lowercase_ : str = f.readlines() with open(lowercase_ , """w""" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(lowercase_ ) if output_pytorch: if not self._testing: remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(lowercase_ : str , lowercase_ : str , lowercase_ : List[str] ): # Create temp file lowercase_ , lowercase_ : str = mkstemp() lowercase_ : Any = False with fdopen(lowercase_ , """w""" ) as new_file: with open(lowercase_ ) as old_file: for line in old_file: new_file.write(lowercase_ ) if line_to_copy_below in line: lowercase_ : List[str] = True for line_to_copy in lines_to_copy: new_file.write(lowercase_ ) if not line_found: raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(lowercase_ , lowercase_ ) # Remove original file remove(lowercase_ ) # Move new file move(lowercase_ , lowercase_ ) def skip_units(lowercase_ : List[Any] ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(lowercase_ : Any ): with open(lowercase_ ) as datafile: lowercase_ : int = [] lowercase_ : Union[str, Any] = False lowercase_ : Any = False for line in datafile: if "# To replace in: " in line and "##" not in line: lowercase_ : List[str] = line.split("""\"""" )[1] lowercase_ : str = skip_units(lowercase_ ) elif "# Below: " in line and "##" not in line: lowercase_ : List[str] = line.split("""\"""" )[1] lowercase_ : Optional[Any] = skip_units(lowercase_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : List[Any] = [] elif "# Replace with" in line and "##" not in line: lowercase_ : str = [] elif "##" not in line: lines_to_copy.append(lowercase_ ) remove(lowercase_ ) replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(lowercase_ )
21
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """decord""" ) self.check_model_type(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ): lowercase_ : Union[str, Any] = {} if frame_sampling_rate is not None: lowercase_ : Any = frame_sampling_rate if num_frames is not None: lowercase_ : Optional[Any] = num_frames lowercase_ : Union[str, Any] = {} if top_k is not None: lowercase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ): if num_frames is None: lowercase_ : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content ) lowercase_ : Optional[Any] = VideoReader(lowercase_ ) videoreader.seek(0 ) lowercase_ : Tuple = 0 lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1 lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa ) lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy() lowercase_ : Union[str, Any] = list(lowercase_ ) lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ): lowercase_ : int = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase_ : str = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
21
1
'''simple docstring''' from sklearn.metrics import recall_score import datasets _lowercase : Any = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" _lowercase : Tuple = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" _lowercase : Union[str, Any] = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class __magic_name__ ( datasets.Metric): def SCREAMING_SNAKE_CASE_ ( self : int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=1 , lowercase_ : Any="binary" , lowercase_ : Optional[Any]=None , lowercase_ : Any="warn" , ): lowercase_ : Union[str, Any] = recall_score( lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ , zero_division=lowercase_ , ) return {"recall": float(lowercase_ ) if score.size == 1 else score}
21
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging _lowercase : Dict = logging.get_logger(__name__) _lowercase : Dict = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''trajectory_transformer''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = { '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : int , lowercase_ : Tuple=100 , lowercase_ : Optional[int]=5 , lowercase_ : Optional[int]=1 , lowercase_ : int=1 , lowercase_ : Any=249 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=17 , lowercase_ : Tuple=25 , lowercase_ : Any=4 , lowercase_ : int=4 , lowercase_ : Optional[Any]=128 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Union[str, Any]=0.00_06 , lowercase_ : List[Any]=512 , lowercase_ : Tuple=0.02 , lowercase_ : List[str]=1E-12 , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=1 , lowercase_ : Optional[int]=50256 , lowercase_ : Union[str, Any]=50256 , **lowercase_ : int , ): lowercase_ : str = vocab_size lowercase_ : Union[str, Any] = action_weight lowercase_ : int = reward_weight lowercase_ : List[Any] = value_weight lowercase_ : Dict = max_position_embeddings lowercase_ : List[str] = block_size lowercase_ : Optional[Any] = action_dim lowercase_ : Any = observation_dim lowercase_ : Union[str, Any] = transition_dim lowercase_ : int = learning_rate lowercase_ : Tuple = n_layer lowercase_ : Optional[int] = n_head lowercase_ : Any = n_embd lowercase_ : str = embd_pdrop lowercase_ : int = attn_pdrop lowercase_ : int = resid_pdrop lowercase_ : Any = initializer_range lowercase_ : List[Any] = layer_norm_eps lowercase_ : int = kaiming_initializer_range lowercase_ : List[str] = use_cache super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
21
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ): lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : List[str] = parent lowercase_ : Any = batch_size lowercase_ : Optional[Any] = num_channels lowercase_ : Tuple = image_size lowercase_ : Optional[Any] = min_resolution lowercase_ : Dict = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[Any] = size lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass def lowerCamelCase ( ) -> Any: lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : Any = Image.open(dataset[4]["""file"""] ) lowercase_ : Dict = Image.open(dataset[5]["""file"""] ) lowercase_ : int = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Optional[int] = prepare_images() # test non-batched lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : Tuple = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : Union[str, Any] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
21
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _lowercase : Tuple = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : Dict , *lowercase_ : List[Any] , **lowercase_ : int ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Optional[Any]=None ): lowercase_ : List[Any] = {} if top_k is not None: lowercase_ : str = top_k return {}, {}, postprocess_params def __call__( self : Union[str, Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : List[Any] ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Tuple ): lowercase_ : Optional[int] = load_image(lowercase_ ) lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, Any] ): lowercase_ : Any = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int]=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[str] = self.model.config.num_labels if self.framework == "pt": lowercase_ : Any = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Dict = probs.topk(lowercase_ ) elif self.framework == "tf": lowercase_ : Optional[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0] lowercase_ : List[str] = tf.math.top_k(lowercase_ , k=lowercase_ ) lowercase_ , lowercase_ : Optional[Any] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : List[Any] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
21
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
21
1
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Optional[int]: return x + 2 class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[str] = """x = 3""" lowercase_ : str = {} lowercase_ : List[str] = evaluate(lowercase_ , {} , state=lowercase_ ) assert result == 3 self.assertDictEqual(lowercase_ , {"""x""": 3} ) lowercase_ : Tuple = """x = y""" lowercase_ : str = {"""y""": 5} lowercase_ : Dict = evaluate(lowercase_ , {} , state=lowercase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowercase_ , {"""x""": 5, """y""": 5} ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[Any] = """y = add_two(x)""" lowercase_ : int = {"""x""": 3} lowercase_ : Optional[Any] = evaluate(lowercase_ , {"""add_two""": add_two} , state=lowercase_ ) assert result == 5 self.assertDictEqual(lowercase_ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowercase_ : Optional[Any] = evaluate(lowercase_ , {} , state=lowercase_ ) assert result is None assert "tried to execute add_two" in out.out def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Any = """x = 3""" lowercase_ : List[Any] = {} lowercase_ : Any = evaluate(lowercase_ , {} , state=lowercase_ ) assert result == 3 self.assertDictEqual(lowercase_ , {"""x""": 3} ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : str = """test_dict = {'x': x, 'y': add_two(x)}""" lowercase_ : Optional[int] = {"""x""": 3} lowercase_ : str = evaluate(lowercase_ , {"""add_two""": add_two} , state=lowercase_ ) self.assertDictEqual(lowercase_ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(lowercase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[int] = """x = 3\ny = 5""" lowercase_ : int = {} lowercase_ : List[str] = evaluate(lowercase_ , {} , state=lowercase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowercase_ , {"""x""": 3, """y""": 5} ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[int] = """text = f'This is x: {x}.'""" lowercase_ : Optional[Any] = {"""x""": 3} lowercase_ : Optional[Any] = evaluate(lowercase_ , {} , state=lowercase_ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(lowercase_ , {"""x""": 3, """text""": """This is x: 3."""} ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : str = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowercase_ : List[Any] = {"""x""": 3} lowercase_ : Tuple = evaluate(lowercase_ , {} , state=lowercase_ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(lowercase_ , {"""x""": 3, """y""": 2} ) lowercase_ : Tuple = {"""x""": 8} lowercase_ : Tuple = evaluate(lowercase_ , {} , state=lowercase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowercase_ , {"""x""": 8, """y""": 5} ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[int] = """test_list = [x, add_two(x)]""" lowercase_ : Dict = {"""x""": 3} lowercase_ : Any = evaluate(lowercase_ , {"""add_two""": add_two} , state=lowercase_ ) self.assertListEqual(lowercase_ , [3, 5] ) self.assertDictEqual(lowercase_ , {"""x""": 3, """test_list""": [3, 5]} ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : str = """y = x""" lowercase_ : str = {"""x""": 3} lowercase_ : Dict = evaluate(lowercase_ , {} , state=lowercase_ ) assert result == 3 self.assertDictEqual(lowercase_ , {"""x""": 3, """y""": 3} ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = """test_list = [x, add_two(x)]\ntest_list[1]""" lowercase_ : Union[str, Any] = {"""x""": 3} lowercase_ : Any = evaluate(lowercase_ , {"""add_two""": add_two} , state=lowercase_ ) assert result == 5 self.assertDictEqual(lowercase_ , {"""x""": 3, """test_list""": [3, 5]} ) lowercase_ : Dict = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowercase_ : Optional[int] = {"""x""": 3} lowercase_ : Any = evaluate(lowercase_ , {"""add_two""": add_two} , state=lowercase_ ) assert result == 5 self.assertDictEqual(lowercase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[Any] = """x = 0\nfor i in range(3):\n x = i""" lowercase_ : Optional[int] = {} lowercase_ : List[Any] = evaluate(lowercase_ , {"""range""": range} , state=lowercase_ ) assert result == 2 self.assertDictEqual(lowercase_ , {"""x""": 2, """i""": 2} )
21
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): if audio_length_in_s is None: lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate lowercase_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowercase_ : List[Any] = int(lowercase_ ) if sample_size % down_scale_factor != 0: lowercase_ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowercase_ : Any = int(lowercase_ ) lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) # set step values self.scheduler.set_timesteps(lowercase_ , device=audio.device ) lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase_ )
21
1
'''simple docstring''' from __future__ import annotations import time import numpy as np _lowercase : Union[str, Any] = [8, 5, 9, 7] _lowercase : Optional[int] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] _lowercase : Union[str, Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class __magic_name__ : def __init__( self : int , lowercase_ : list[int] , lowercase_ : list[list[int]] , lowercase_ : list[list[int]] , ): lowercase_ : List[Any] = claim_vector lowercase_ : List[str] = allocated_resources_table lowercase_ : Optional[int] = maximum_claim_table def SCREAMING_SNAKE_CASE_ ( self : Any ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def SCREAMING_SNAKE_CASE_ ( self : Dict ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def SCREAMING_SNAKE_CASE_ ( self : str ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase_ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return {self.__need().index(lowercase_ ): i for i in self.__need()} def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Dict ): lowercase_ : List[str] = self.__need() lowercase_ : Optional[int] = self.__allocated_resources_table lowercase_ : Dict = self.__available_resources() lowercase_ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: lowercase_ : Any = False for each_need in need_list: lowercase_ : Union[str, Any] = True for index, need in enumerate(lowercase_ ): if need > available_resources[index]: lowercase_ : Optional[int] = False break if execution: lowercase_ : List[Any] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowercase_ : Any = original_need_index print(f'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(lowercase_ ) # update available/freed resources stack lowercase_ : int = np.array(lowercase_ ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(lowercase_ ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def SCREAMING_SNAKE_CASE_ ( self : int ): print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( f'''P{self.__allocated_resources_table.index(lowercase_ ) + 1}''' + """ """.join(f'''{it:>8}''' for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( f'''P{self.__maximum_claim_table.index(lowercase_ ) + 1}''' + """ """.join(f'''{it:>8}''' for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(lowercase_ ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(lowercase_ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Union[str, Any] = "src/transformers" _lowercase : str = "docs/source/en" _lowercase : Union[str, Any] = "." def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int: with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Union[str, Any] = f.readlines() # Find the start prompt. lowercase_ : Optional[Any] = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 lowercase_ : int = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ ) return [m.group(0 ) for m in matches] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]: lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ ) lowercase_ : List[str] = (width - text_length) // 2 lowercase_ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) -> Any: lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = None if attr_name.endswith("""Tokenizer""" ): lowercase_ : Optional[int] = slow_tokenizers lowercase_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowercase_ : Optional[Any] = fast_tokenizers lowercase_ : Dict = attr_name[:-13] elif _re_tf_models.match(UpperCAmelCase__ ) is not None: lowercase_ : str = tf_models lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0] elif _re_flax_models.match(UpperCAmelCase__ ) is not None: lowercase_ : List[str] = flax_models lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0] elif _re_pt_models.match(UpperCAmelCase__ ) is not None: lowercase_ : Tuple = pt_models lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] ) # Let's build that table! lowercase_ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns] lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowercase_ : int = {True: """✅""", False: """❌"""} for name in model_names: lowercase_ : str = model_name_to_prefix[name] lowercase_ : Any = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n" return table def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str: lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file( filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowercase_ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int ) -> list[int]: if num <= 0: raise ValueError("""Input must be a positive integer""" ) lowercase_ : int = [True] * (num + 1) lowercase_ : int = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , UpperCAmelCase__ ): lowercase_ : List[Any] = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Any = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
21
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __magic_name__ ( ctypes.Structure): # _fields is a specific attr expected by ctypes UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def lowerCamelCase ( ) -> List[Any]: if os.name == "nt": lowercase_ : List[Any] = CursorInfo() lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> str: if os.name == "nt": lowercase_ : int = CursorInfo() lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> Any: try: hide_cursor() yield finally: show_cursor()
21
1
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def lowerCamelCase ( UpperCAmelCase__ : Any ) -> int: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_tf class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Dict ): pass def SCREAMING_SNAKE_CASE_ ( self : int ): pass def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any]=None , **lowercase_ : Any ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Dict = TFVisionTextDualEncoderModel(lowercase_ ) lowercase_ : int = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) lowercase_ : Union[str, Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=None , **lowercase_ : List[str] ): lowercase_ , lowercase_ : str = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : List[str] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str=None , **lowercase_ : Any ): lowercase_ , lowercase_ : Tuple = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) lowercase_ : int = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : str = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : Dict = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Optional[int] = after_output[0].numpy() lowercase_ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Any=None , **lowercase_ : Union[str, Any] ): lowercase_ , lowercase_ : List[str] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Any = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : List[Any] = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : int = to_atuple(vision_model.config.image_size ) lowercase_ : int = to_atuple(vision_model.config.patch_size ) lowercase_ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Optional[int] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : str = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : str = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Dict = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ , lowercase_ : Tuple = self.get_pretrained_model_and_inputs() lowercase_ : List[str] = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : str = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : int = model_a(**lowercase_ ) lowercase_ : str = after_outputs[0].numpy() lowercase_ : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_tf class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) lowercase_ : Optional[int] = 13 lowercase_ : Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowercase_ : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowercase_ : Optional[Any] = random_attention_mask([batch_size, 4] ) lowercase_ : Any = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any , lowercase_ : Union[str, Any] ): lowercase_ : Any = TFViTModel(lowercase_ , name="""vision_model""" ) lowercase_ : Optional[Any] = TFBertModel(lowercase_ , name="""text_model""" ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[int] = TFViTModelTester(self ) lowercase_ : Tuple = TFBertModelTester(self ) lowercase_ : Optional[int] = vit_model_tester.prepare_config_and_inputs() lowercase_ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Tuple = vision_config_and_inputs ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : str ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. lowercase_ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) lowercase_ : Optional[int] = 13 lowercase_ : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowercase_ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowercase_ : Optional[Any] = random_attention_mask([batch_size, 4] ) lowercase_ : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : Union[str, Any] ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) lowercase_ : int = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : str = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowercase_ : Dict = to_atuple(vision_model.config.image_size ) lowercase_ : Tuple = to_atuple(vision_model.config.patch_size ) lowercase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : List[str] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Dict = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[str] , lowercase_ : Dict ): lowercase_ : Dict = TFDeiTModel(lowercase_ , name="""vision_model""" ) lowercase_ : Tuple = TFRobertaModel(lowercase_ , name="""text_model""" ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Optional[Any] = TFDeiTModelTester(self ) lowercase_ : int = TFRobertaModelTester(self ) lowercase_ : Optional[int] = vit_model_tester.prepare_config_and_inputs() lowercase_ : int = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : int = vision_config_and_inputs ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) lowercase_ : List[Any] = 13 lowercase_ : Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowercase_ : Any = random_attention_mask([batch_size, 4] ) lowercase_ : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : List[Any] ): lowercase_ : Dict = TFCLIPVisionModel(lowercase_ , name="""vision_model""" ) lowercase_ : Dict = TFBertModel(lowercase_ , name="""text_model""" ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Optional[int] = TFCLIPVisionModelTester(self ) lowercase_ : Union[str, Any] = TFBertModelTester(self ) lowercase_ : int = clip_model_tester.prepare_config_and_inputs() lowercase_ : Dict = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : int = vision_config_and_inputs ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Tuple = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=lowercase_ ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Any = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : Dict = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : List[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase_ , atol=1E-3 ) )
21
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Optional[int] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**lowercase_ ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''}) UpperCamelCase__ = field( default='''O1''', metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase_ : Optional[Any] = torch.device("""cpu""" ) lowercase_ : Tuple = 0 elif is_torch_tpu_available(): lowercase_ : Optional[int] = xm.xla_device() lowercase_ : str = 0 else: lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.n_gpu > 0
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: while second != 0: lowercase_ : str = first & second first ^= second lowercase_ : str = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Tuple = int(input("Enter the first number: ").strip()) _lowercase : Any = int(input("Enter the second number: ").strip()) print(f"""{add(first, second) = }""")
21
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' import os import numpy import onnx def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple: lowercase_ : Tuple = a.name lowercase_ : Tuple = b.name lowercase_ : Any = """""" lowercase_ : List[Any] = """""" lowercase_ : List[Any] = a == b lowercase_ : Union[str, Any] = name_a lowercase_ : Optional[Any] = name_b return res def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]: lowercase_ : int = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : Optional[Any] = inits[i].name lowercase_ : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]: lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ ) lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ ) lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ : List[Any] = list(model.graph.initializer ) lowercase_ : int = set() lowercase_ : int = {} lowercase_ : str = [] lowercase_ : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) lowercase_ : Dict = inits[j].data_type lowercase_ : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size lowercase_ : int = inits[i].name lowercase_ : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowercase_ : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = """optimized_""" + model_file_name lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
21
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
1
'''simple docstring''' from __future__ import annotations import collections import pprint from pathlib import Path def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: return "".join(sorted(UpperCAmelCase__ ) ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> list[str]: return word_by_signature[signature(UpperCAmelCase__ )] _lowercase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") _lowercase : List[Any] = sorted({word.strip().lower() for word in data.splitlines()}) _lowercase : List[Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _lowercase : List[str] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
21
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: lowercase_ : Any = prime_factors(UpperCAmelCase__ ) if is_square_free(UpperCAmelCase__ ): return -1 if len(UpperCAmelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = CycleDiffusionPipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''} UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''}) UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE_ ( self : int ): torch.manual_seed(0 ) lowercase_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase_ : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0 ) lowercase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase_ : int = CLIPTextModel(lowercase_ ) lowercase_ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ : Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : int=0 ): lowercase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowercase_ : int = image / 2 + 0.5 if str(lowercase_ ).startswith("""mps""" ): lowercase_ : Dict = torch.manual_seed(lowercase_ ) else: lowercase_ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase_ : Optional[int] = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase_ : Any = self.get_dummy_components() lowercase_ : List[Any] = CycleDiffusionPipeline(**lowercase_ ) lowercase_ : Any = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Dict = self.get_dummy_inputs(lowercase_ ) lowercase_ : Any = pipe(**lowercase_ ) lowercase_ : Dict = output.images lowercase_ : Dict = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) lowercase_ : List[Any] = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase_ , """half""" ): lowercase_ : List[str] = module.half() lowercase_ : str = CycleDiffusionPipeline(**lowercase_ ) lowercase_ : Optional[Any] = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : str = self.get_dummy_inputs(lowercase_ ) lowercase_ : List[Any] = pipe(**lowercase_ ) lowercase_ : int = output.images lowercase_ : Union[str, Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) lowercase_ : List[Any] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): return super().test_inference_batch_single_identical() @skip_mps def SCREAMING_SNAKE_CASE_ ( self : Dict ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def SCREAMING_SNAKE_CASE_ ( self : str ): return super().test_save_load_optional_components() @skip_mps def SCREAMING_SNAKE_CASE_ ( self : str ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) lowercase_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) lowercase_ : int = init_image.resize((512, 512) ) lowercase_ : Optional[int] = """CompVis/stable-diffusion-v1-4""" lowercase_ : Any = DDIMScheduler.from_pretrained(lowercase_ , subfolder="""scheduler""" ) lowercase_ : List[Any] = CycleDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowercase_ : Tuple = """A black colored car""" lowercase_ : Optional[Any] = """A blue colored car""" lowercase_ : List[Any] = torch.manual_seed(0 ) lowercase_ : Union[str, Any] = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type="""np""" , ) lowercase_ : List[str] = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) lowercase_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) lowercase_ : str = init_image.resize((512, 512) ) lowercase_ : Any = """CompVis/stable-diffusion-v1-4""" lowercase_ : int = DDIMScheduler.from_pretrained(lowercase_ , subfolder="""scheduler""" ) lowercase_ : Any = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowercase_ : List[Any] = """A black colored car""" lowercase_ : List[Any] = """A blue colored car""" lowercase_ : Any = torch.manual_seed(0 ) lowercase_ : Optional[Any] = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type="""np""" , ) lowercase_ : List[str] = output.images assert np.abs(image - expected_image ).max() < 2E-2
21
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
1
'''simple docstring''' import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase ( UpperCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[Any]: warnings.warn( """The preprocess method is deprecated and will be removed in a future version. Please""" """ use VaeImageProcessor.preprocess instead""" , UpperCAmelCase__ , ) if isinstance(UpperCAmelCase__ , torch.Tensor ): return image elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = [image] if isinstance(image[0] , PIL.Image.Image ): lowercase_ , lowercase_ : List[Any] = image[0].size lowercase_ , lowercase_ : Dict = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowercase_ : List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] lowercase_ : List[str] = np.concatenate(UpperCAmelCase__ , axis=0 ) lowercase_ : Tuple = np.array(UpperCAmelCase__ ).astype(np.floataa ) / 255.0 lowercase_ : str = image.transpose(0 , 3 , 1 , 2 ) lowercase_ : Any = 2.0 * image - 1.0 lowercase_ : str = torch.from_numpy(UpperCAmelCase__ ) elif isinstance(image[0] , torch.Tensor ): lowercase_ : Tuple = torch.cat(UpperCAmelCase__ , dim=0 ) return image def lowerCamelCase ( UpperCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[Any]: if isinstance(UpperCAmelCase__ , torch.Tensor ): return mask elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : Any = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowercase_ , lowercase_ : List[Any] = mask[0].size lowercase_ , lowercase_ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowercase_ : Any = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask] lowercase_ : List[Any] = np.concatenate(UpperCAmelCase__ , axis=0 ) lowercase_ : Union[str, Any] = mask.astype(np.floataa ) / 255.0 lowercase_ : Tuple = 0 lowercase_ : Dict = 1 lowercase_ : str = torch.from_numpy(UpperCAmelCase__ ) elif isinstance(mask[0] , torch.Tensor ): lowercase_ : Optional[int] = torch.cat(UpperCAmelCase__ , dim=0 ) return mask class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = 42 UpperCamelCase__ = 42 def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : int ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : Optional[int] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : int = 250 , lowercase_ : float = 0.0 , lowercase_ : int = 10 , lowercase_ : int = 10 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ): lowercase_ : List[str] = image lowercase_ : Optional[Any] = _preprocess_image(lowercase_ ) lowercase_ : Dict = original_image.to(device=self.device , dtype=self.unet.dtype ) lowercase_ : Optional[Any] = _preprocess_mask(lowercase_ ) lowercase_ : Dict = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowercase_ : Optional[int] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Optional[int] = original_image.shape lowercase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device ) lowercase_ : Optional[int] = eta lowercase_ : Optional[Any] = self.scheduler.timesteps[0] + 1 lowercase_ : int = generator[0] if isinstance(lowercase_ , lowercase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowercase_ : Any = self.unet(lowercase_ , lowercase_ ).sample # compute previous image: x_t -> x_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowercase_ : str = self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : Dict = t lowercase_ : Any = (image / 2 + 0.5).clamp(0 , 1 ) lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase_ : str = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
21
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
21
1
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowercase : List[Any] = 16 _lowercase : List[str] = 32 def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 ) -> Optional[Any]: lowercase_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowercase_ : int = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCAmelCase__ : int ): # max_length=None => use the model max length (it's actually the default) lowercase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase_ : Optional[int] = datasets.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCAmelCase__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase_ : List[str] = 16 elif accelerator.mixed_precision != "no": lowercase_ : Union[str, Any] = 8 else: lowercase_ : List[str] = None return tokenizer.pad( UpperCAmelCase__ , padding="""longest""" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowercase_ : List[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) lowercase_ : Tuple = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowercase : Optional[int] = mocked_dataloaders # noqa: F811 def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ) -> List[str]: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase__ ) == "1": lowercase_ : List[str] = 2 # Initialize accelerator lowercase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ : List[Any] = config["""lr"""] lowercase_ : Optional[Any] = int(config["""num_epochs"""] ) lowercase_ : Optional[int] = int(config["""seed"""] ) lowercase_ : Any = int(config["""batch_size"""] ) lowercase_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=UpperCAmelCase__ ) def inner_training_loop(UpperCAmelCase__ : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(UpperCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase_ : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer lowercase_ : Union[str, Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase__ ) lowercase_ , lowercase_ : int = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ ) # Instantiate scheduler lowercase_ : Optional[Any] = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = accelerator.prepare( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Now we train the model for epoch in range(UpperCAmelCase__ ): model.train() for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase_ : str = model(**UpperCAmelCase__ ) lowercase_ : Tuple = outputs.loss accelerator.backward(UpperCAmelCase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ : Tuple = model(**UpperCAmelCase__ ) lowercase_ : int = outputs.logits.argmax(dim=-1 ) lowercase_ , lowercase_ : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , ) lowercase_ : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def lowerCamelCase ( ) -> int: lowercase_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowercase_ : int = parser.parse_args() lowercase_ : Dict = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": main()
21
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]: # Initialise PyTorch model lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _lowercase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Any: if b == 0: return 1 if (b % 2) == 0: return actual_power(UpperCAmelCase__ , int(b / 2 ) ) * actual_power(UpperCAmelCase__ , int(b / 2 ) ) else: return a * actual_power(UpperCAmelCase__ , int(b / 2 ) ) * actual_power(UpperCAmelCase__ , int(b / 2 ) ) def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float: if b < 0: return 1 / actual_power(UpperCAmelCase__ , UpperCAmelCase__ ) return actual_power(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": print(power(-2, -3))
21
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : str = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : Optional[int] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "PIL.Image.Image" UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Tuple ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : int = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Union[str, Any] = {} lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : int = PIL.Image.open(lowercase_ ) else: lowercase_ : str = path.split("""::""" )[-1] try: lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : str = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Dict = BytesIO(f.read() ) lowercase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : Optional[int] = storage.field("""bytes""" ) else: lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : Dict = storage.field("""path""" ) else: lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : Optional[int] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Tuple = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Optional[Any] ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : int = f.read() return bytes_ lowercase_ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : Any = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : Tuple = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : List[Any] = array.dtype lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Dict = dtype.kind lowercase_ : List[Any] = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : int = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : str = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
21
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) _lowercase : Optional[int] = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys _lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
21
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Tuple: lowercase_ : Any = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = StableDiffusionLatentUpscalePipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''height''', '''width''', '''cross_attention_kwargs''', '''negative_prompt_embeds''', '''prompt_embeds''', } UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''} UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase__ = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCamelCase__ = frozenset([]) UpperCamelCase__ = True @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[str] = 1 lowercase_ : Union[str, Any] = 4 lowercase_ : Optional[Any] = (16, 16) lowercase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ ) return image def SCREAMING_SNAKE_CASE_ ( self : Tuple ): torch.manual_seed(0 ) lowercase_ : int = UNetaDConditionModel( act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=lowercase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( """KDownBlock2D""", """KCrossAttnDownBlock2D""", """KCrossAttnDownBlock2D""", """KCrossAttnDownBlock2D""", ) , in_channels=8 , mid_block_type=lowercase_ , only_cross_attention=lowercase_ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , ) lowercase_ : List[str] = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ """DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D""", ] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) lowercase_ : int = EulerDiscreteScheduler(prediction_type="""sample""" ) lowercase_ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""quick_gelu""" , projection_dim=512 , ) lowercase_ : int = CLIPTextModel(lowercase_ ) lowercase_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ : List[str] = { """unet""": model.eval(), """vae""": vae.eval(), """scheduler""": scheduler, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : Optional[Any]=0 ): if str(lowercase_ ).startswith("""mps""" ): lowercase_ : Optional[int] = torch.manual_seed(lowercase_ ) else: lowercase_ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase_ : str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": self.dummy_image.cpu(), """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[Any] = """cpu""" lowercase_ : Any = self.get_dummy_components() lowercase_ : str = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Optional[int] = self.get_dummy_inputs(lowercase_ ) lowercase_ : List[Any] = pipe(**lowercase_ ).images lowercase_ : Optional[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 256, 256, 3) ) lowercase_ : Any = np.array( [0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] ) lowercase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def SCREAMING_SNAKE_CASE_ ( self : str ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : str ): super().test_save_load_local(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : int ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = [ """DDIMScheduler""", """DDPMScheduler""", """PNDMScheduler""", """HeunDiscreteScheduler""", """EulerAncestralDiscreteScheduler""", """KDPM2DiscreteScheduler""", """KDPM2AncestralDiscreteScheduler""", """DPMSolverSDEScheduler""", ] lowercase_ : Union[str, Any] = self.get_dummy_components() lowercase_ : Dict = self.pipeline_class(**lowercase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Dict = self.get_dummy_inputs(lowercase_ ) lowercase_ : Optional[int] = 2 lowercase_ : int = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue lowercase_ : Union[str, Any] = getattr(lowercase_ , scheduler_enum.name ) lowercase_ : List[str] = scheduler_cls.from_config(pipe.scheduler.config ) lowercase_ : Union[str, Any] = pipe(**lowercase_ )[0] outputs.append(lowercase_ ) assert check_same_shape(lowercase_ ) @require_torch_gpu @slow class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = torch.manual_seed(33 ) lowercase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowercase_ : Any = StableDiffusionLatentUpscalePipeline.from_pretrained( """stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa ) upscaler.to("""cuda""" ) lowercase_ : List[Any] = """a photo of an astronaut high resolution, unreal engine, ultra realistic""" lowercase_ : int = pipe(lowercase_ , generator=lowercase_ , output_type="""latent""" ).images lowercase_ : Optional[int] = upscaler( prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="""np""" , ).images[0] lowercase_ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" ) assert np.abs((expected_image - image).mean() ) < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = torch.manual_seed(33 ) lowercase_ : str = StableDiffusionLatentUpscalePipeline.from_pretrained( """stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa ) upscaler.to("""cuda""" ) lowercase_ : Union[str, Any] = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas""" lowercase_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" ) lowercase_ : Dict = upscaler( prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="""np""" , ).images[0] lowercase_ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" ) assert np.abs((expected_image - image).max() ) < 5E-2
21
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = DistilBertTokenizer UpperCamelCase__ = DistilBertTokenizerFast UpperCamelCase__ = True @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
21
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''WhisperFeatureExtractor''' UpperCamelCase__ = '''WhisperTokenizer''' def __init__( self : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ): super().__init__(lowercase_ , lowercase_ ) lowercase_ : Any = self.feature_extractor lowercase_ : Tuple = False def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str=None , lowercase_ : Any=None , lowercase_ : Any=True ): return self.tokenizer.get_decoder_prompt_ids(task=lowercase_ , language=lowercase_ , no_timestamps=lowercase_ ) def __call__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : Dict ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowercase_ , **lowercase_ ) lowercase_ : str = kwargs.pop("""audio""" , lowercase_ ) lowercase_ : Tuple = kwargs.pop("""sampling_rate""" , lowercase_ ) lowercase_ : Dict = kwargs.pop("""text""" , lowercase_ ) if len(lowercase_ ) > 0: lowercase_ : Tuple = args[0] lowercase_ : Any = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: lowercase_ : List[Any] = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ ) if text is not None: lowercase_ : Any = self.tokenizer(lowercase_ , **lowercase_ ) if text is None: return inputs elif audio is None: return encodings else: lowercase_ : List[Any] = encodings["""input_ids"""] return inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , *lowercase_ : List[str] , **lowercase_ : List[Any] ): return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : List[Any] ): return self.tokenizer.decode(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str , lowercase_ : List[Any]="np" ): return self.tokenizer.get_prompt_ids(lowercase_ , return_tensors=lowercase_ )
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __magic_name__ ( _UpperCAmelCase): def __init__( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Any=13 , lowercase_ : Optional[int]=7 , lowercase_ : List[Any]=True , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Any=99 , lowercase_ : List[str]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Tuple=4 , lowercase_ : int=37 , lowercase_ : Any="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Any=0.02 , lowercase_ : str=False , lowercase_ : List[Any]=True , lowercase_ : Dict="None" , lowercase_ : Tuple=3 , lowercase_ : Any=4 , lowercase_ : Tuple=None , ): lowercase_ : int = parent lowercase_ : List[Any] = batch_size lowercase_ : Optional[Any] = seq_length lowercase_ : List[str] = is_training lowercase_ : List[Any] = use_input_mask lowercase_ : Optional[int] = use_token_type_ids lowercase_ : Union[str, Any] = use_labels lowercase_ : Dict = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[Any] = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : Any = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Dict = type_vocab_size lowercase_ : List[str] = type_sequence_label_size lowercase_ : List[Any] = initializer_range lowercase_ : Union[str, Any] = num_labels lowercase_ : Any = num_choices lowercase_ : Optional[int] = relative_attention lowercase_ : Tuple = position_biased_input lowercase_ : List[str] = pos_att_type lowercase_ : Any = scope def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ : Optional[Any] = None if self.use_input_mask: lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowercase_ : List[str] = None if self.use_token_type_ids: lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ : str = None lowercase_ : List[str] = None lowercase_ : List[Any] = None if self.use_labels: lowercase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ : str = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self : str ): return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_config() lowercase_ : int = 300 return config def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Tuple ): lowercase_ : str = DebertaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0] lowercase_ : Any = model(lowercase_ , token_type_ids=lowercase_ )[0] lowercase_ : Any = model(lowercase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str ): lowercase_ : Any = DebertaForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int ): lowercase_ : List[Any] = self.num_labels lowercase_ : Optional[Any] = DebertaForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : str ): lowercase_ : List[Any] = self.num_labels lowercase_ : List[Any] = DebertaForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Dict ): lowercase_ : Any = DebertaForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Optional[Any] = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : List[Any] = config_and_inputs lowercase_ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Optional[int] = DebertaModelTester(self ) lowercase_ : Any = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = DebertaModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_torch @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @unittest.skip(reason="""Model not available yet""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) lowercase_ : Union[str, Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) lowercase_ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase_ : int = model(lowercase_ , attention_mask=lowercase_ )[0] # compare the actual values for a slice. lowercase_ : str = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
21
'''simple docstring''' import os import numpy import onnx def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple: lowercase_ : Tuple = a.name lowercase_ : Tuple = b.name lowercase_ : Any = """""" lowercase_ : List[Any] = """""" lowercase_ : List[Any] = a == b lowercase_ : Union[str, Any] = name_a lowercase_ : Optional[Any] = name_b return res def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]: lowercase_ : int = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : Optional[Any] = inits[i].name lowercase_ : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]: lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ ) lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ ) lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ : List[Any] = list(model.graph.initializer ) lowercase_ : int = set() lowercase_ : int = {} lowercase_ : str = [] lowercase_ : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) lowercase_ : Dict = inits[j].data_type lowercase_ : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size lowercase_ : int = inits[i].name lowercase_ : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowercase_ : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = """optimized_""" + model_file_name lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
21
1
'''simple docstring''' from __future__ import annotations import requests def lowerCamelCase ( UpperCAmelCase__ : str ) -> dict: lowercase_ : Any = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(UpperCAmelCase__ ).json() def lowerCamelCase ( UpperCAmelCase__ : int = 10 ) -> list[dict]: lowercase_ : Optional[int] = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty""" lowercase_ : str = requests.get(UpperCAmelCase__ ).json()[:max_stories] return [get_hackernews_story(UpperCAmelCase__ ) for story_id in story_ids] def lowerCamelCase ( UpperCAmelCase__ : int = 10 ) -> str: lowercase_ : List[Any] = hackernews_top_stories(UpperCAmelCase__ ) return "\n".join("""* [{title}]({url})""".format(**UpperCAmelCase__ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
21
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ): lowercase_ : Optional[Any] = {} lowercase_ : Tuple = {} if prompt is not None: lowercase_ : Tuple = prompt if generate_kwargs is not None: lowercase_ : List[str] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase_ : List[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase_ : str = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ): lowercase_ : List[Any] = load_image(lowercase_ ) if prompt is not None: if not isinstance(lowercase_ , lowercase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase_ : List[Any] = self.model.config.model_type if model_type == "git": lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase_ : str = None return model_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowercase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase_ : Any = None if generate_kwargs is None: lowercase_ : Optional[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase_ : Dict = model_inputs.pop(self.model.main_input_name ) lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ): lowercase_ : List[str] = [] for output_ids in model_outputs: lowercase_ : Union[str, Any] = { """generated_text""": self.tokenizer.decode( lowercase_ , skip_special_tokens=lowercase_ , ) } records.append(lowercase_ ) return records
21
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowercase : Any = logging.get_logger(__name__) _lowercase : List[Any] = { "SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''deformable_detr''' UpperCamelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Any , lowercase_ : Tuple=True , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=3 , lowercase_ : Any=300 , lowercase_ : Tuple=1024 , lowercase_ : Optional[int]=6 , lowercase_ : List[str]=1024 , lowercase_ : Dict=8 , lowercase_ : Dict=6 , lowercase_ : Tuple=1024 , lowercase_ : Optional[int]=8 , lowercase_ : Dict=0.0 , lowercase_ : str=True , lowercase_ : Union[str, Any]="relu" , lowercase_ : List[str]=256 , lowercase_ : List[str]=0.1 , lowercase_ : Any=0.0 , lowercase_ : Any=0.0 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Tuple=1.0 , lowercase_ : Dict=True , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple="sine" , lowercase_ : Dict="resnet50" , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=False , lowercase_ : List[str]=4 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=4 , lowercase_ : Union[str, Any]=False , lowercase_ : List[Any]=300 , lowercase_ : Any=False , lowercase_ : Any=1 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=1 , lowercase_ : List[Any]=1 , lowercase_ : Optional[int]=5 , lowercase_ : List[str]=2 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=0.25 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ): if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowercase_ : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowercase_ , lowercase_ ): lowercase_ : str = backbone_config.get("""model_type""" ) lowercase_ : List[Any] = CONFIG_MAPPING[backbone_model_type] lowercase_ : Union[str, Any] = config_class.from_dict(lowercase_ ) lowercase_ : Tuple = use_timm_backbone lowercase_ : Dict = backbone_config lowercase_ : str = num_channels lowercase_ : List[Any] = num_queries lowercase_ : str = max_position_embeddings lowercase_ : Union[str, Any] = d_model lowercase_ : List[Any] = encoder_ffn_dim lowercase_ : Dict = encoder_layers lowercase_ : Tuple = encoder_attention_heads lowercase_ : Any = decoder_ffn_dim lowercase_ : int = decoder_layers lowercase_ : Optional[Any] = decoder_attention_heads lowercase_ : List[Any] = dropout lowercase_ : Dict = attention_dropout lowercase_ : Optional[int] = activation_dropout lowercase_ : int = activation_function lowercase_ : Union[str, Any] = init_std lowercase_ : Optional[Any] = init_xavier_std lowercase_ : Union[str, Any] = encoder_layerdrop lowercase_ : Optional[int] = auxiliary_loss lowercase_ : Optional[Any] = position_embedding_type lowercase_ : str = backbone lowercase_ : Any = use_pretrained_backbone lowercase_ : Optional[int] = dilation # deformable attributes lowercase_ : List[Any] = num_feature_levels lowercase_ : Tuple = encoder_n_points lowercase_ : Optional[Any] = decoder_n_points lowercase_ : Optional[int] = two_stage lowercase_ : Union[str, Any] = two_stage_num_proposals lowercase_ : str = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher lowercase_ : str = class_cost lowercase_ : Optional[Any] = bbox_cost lowercase_ : str = giou_cost # Loss coefficients lowercase_ : str = mask_loss_coefficient lowercase_ : Union[str, Any] = dice_loss_coefficient lowercase_ : Optional[Any] = bbox_loss_coefficient lowercase_ : Union[str, Any] = giou_loss_coefficient lowercase_ : str = eos_coefficient lowercase_ : List[Any] = focal_alpha lowercase_ : Union[str, Any] = disable_custom_kernels super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : str ): return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE_ ( self : Any ): return self.d_model def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowercase_ : str = self.backbone_config.to_dict() lowercase_ : Dict = self.__class__.model_type return output
21
'''simple docstring''' class __magic_name__ : def __init__( self : int , lowercase_ : list ): lowercase_ : Dict = set_counts lowercase_ : List[Any] = max(lowercase_ ) lowercase_ : str = len(lowercase_ ) lowercase_ : str = [1] * num_sets lowercase_ : Dict = list(range(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : List[Any] = self.get_parent(lowercase_ ) lowercase_ : Union[str, Any] = self.get_parent(lowercase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : List[str] = 0 lowercase_ : Optional[int] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : int = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : int = 0 lowercase_ : List[Any] = src_parent lowercase_ : List[Any] = self.set_counts[src_parent] lowercase_ : Tuple = max(self.max_set , lowercase_ ) return True def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ): if self.parents[disj_set] == disj_set: return disj_set lowercase_ : int = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
21
1
'''simple docstring''' import re def lowerCamelCase ( UpperCAmelCase__ : str ) -> bool: lowercase_ : Any = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(UpperCAmelCase__ , UpperCAmelCase__ ) ) if __name__ == "__main__": _lowercase : Optional[int] = "0094702343221" print(is_sri_lankan_phone_number(phone))
21
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """decord""" ) self.check_model_type(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ): lowercase_ : Union[str, Any] = {} if frame_sampling_rate is not None: lowercase_ : Any = frame_sampling_rate if num_frames is not None: lowercase_ : Optional[Any] = num_frames lowercase_ : Union[str, Any] = {} if top_k is not None: lowercase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ): if num_frames is None: lowercase_ : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content ) lowercase_ : Optional[Any] = VideoReader(lowercase_ ) videoreader.seek(0 ) lowercase_ : Tuple = 0 lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1 lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa ) lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy() lowercase_ : Union[str, Any] = list(lowercase_ ) lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ): lowercase_ : int = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase_ : str = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
21
1
'''simple docstring''' import numpy as np import qiskit def lowerCamelCase ( UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int | None = None ) -> str: lowercase_ : Tuple = np.random.default_rng(seed=UpperCAmelCase__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. lowercase_ : Dict = 6 * key_len # Measurement basis for Alice's qubits. lowercase_ : int = rng.integers(2 , size=UpperCAmelCase__ ) # The set of states Alice will prepare. lowercase_ : Dict = rng.integers(2 , size=UpperCAmelCase__ ) # Measurement basis for Bob's qubits. lowercase_ : Tuple = rng.integers(2 , size=UpperCAmelCase__ ) # Quantum Circuit to simulate BB84 lowercase_ : int = qiskit.QuantumCircuit(UpperCAmelCase__ , name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(UpperCAmelCase__ ): if alice_state[index] == 1: bbaa_circ.x(UpperCAmelCase__ ) if alice_basis[index] == 1: bbaa_circ.h(UpperCAmelCase__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(UpperCAmelCase__ ): if bob_basis[index] == 1: bbaa_circ.h(UpperCAmelCase__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. lowercase_ : int = qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. lowercase_ : List[str] = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1 , seed_simulator=UpperCAmelCase__ ) # Returns the result of measurement. lowercase_ : Any = job.result().get_counts(UpperCAmelCase__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. lowercase_ : Union[str, Any] = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. lowercase_ : Union[str, Any] = gen_key[:key_len] if len(UpperCAmelCase__ ) >= key_len else gen_key.ljust(UpperCAmelCase__ , """0""" ) return key if __name__ == "__main__": print(f"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
21
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
1
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = 1 UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : int = [] lowercase_ : List[Any] = [] for i in range(self.num_layers ): lowercase_ : str = self.in_channels if i == 0 else self.out_channels lowercase_ : int = FlaxResnetBlockaD( in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowercase_ ) lowercase_ : Dict = resnets lowercase_ : List[str] = attentions if self.add_downsample: lowercase_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=True ): lowercase_ : str = () for resnet, attn in zip(self.resnets , self.attentions ): lowercase_ : Any = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) lowercase_ : Optional[int] = attn(lowercase_ , lowercase_ , deterministic=lowercase_ ) output_states += (hidden_states,) if self.add_downsample: lowercase_ : List[str] = self.downsamplers_a(lowercase_ ) output_states += (hidden_states,) return hidden_states, output_states class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = True UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Tuple = [] for i in range(self.num_layers ): lowercase_ : Tuple = self.in_channels if i == 0 else self.out_channels lowercase_ : Dict = FlaxResnetBlockaD( in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : Dict = resnets if self.add_downsample: lowercase_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , lowercase_ : int , lowercase_ : Dict , lowercase_ : Tuple=True ): lowercase_ : Optional[Any] = () for resnet in self.resnets: lowercase_ : Union[str, Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) output_states += (hidden_states,) if self.add_downsample: lowercase_ : Tuple = self.downsamplers_a(lowercase_ ) output_states += (hidden_states,) return hidden_states, output_states class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = 1 UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[Any] = [] lowercase_ : List[Any] = [] for i in range(self.num_layers ): lowercase_ : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowercase_ : List[Any] = self.prev_output_channel if i == 0 else self.out_channels lowercase_ : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : Any = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowercase_ ) lowercase_ : int = resnets lowercase_ : Tuple = attentions if self.add_upsample: lowercase_ : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int , lowercase_ : Tuple , lowercase_ : int=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states lowercase_ : str = res_hidden_states_tuple[-1] lowercase_ : Optional[int] = res_hidden_states_tuple[:-1] lowercase_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowercase_ : Optional[int] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) lowercase_ : List[str] = attn(lowercase_ , lowercase_ , deterministic=lowercase_ ) if self.add_upsample: lowercase_ : int = self.upsamplers_a(lowercase_ ) return hidden_states class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = True UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Dict = [] for i in range(self.num_layers ): lowercase_ : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowercase_ : int = self.prev_output_channel if i == 0 else self.out_channels lowercase_ : Any = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : int = resnets if self.add_upsample: lowercase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=True ): for resnet in self.resnets: # pop res hidden states lowercase_ : int = res_hidden_states_tuple[-1] lowercase_ : List[str] = res_hidden_states_tuple[:-1] lowercase_ : int = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowercase_ : Union[str, Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) if self.add_upsample: lowercase_ : Dict = self.upsamplers_a(lowercase_ ) return hidden_states class __magic_name__ ( nn.Module): UpperCamelCase__ = 42 UpperCamelCase__ = 0.0 UpperCamelCase__ = 1 UpperCamelCase__ = 1 UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): # there is always at least one resnet lowercase_ : List[str] = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] lowercase_ : List[Any] = [] for _ in range(self.num_layers ): lowercase_ : str = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowercase_ ) lowercase_ : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_ ) lowercase_ : Optional[Any] = resnets lowercase_ : Optional[int] = attentions def __call__( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : str=True ): lowercase_ : Optional[int] = self.resnets[0](lowercase_ , lowercase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): lowercase_ : List[Any] = attn(lowercase_ , lowercase_ , deterministic=lowercase_ ) lowercase_ : str = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ ) return hidden_states
21
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ): lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : List[str] = parent lowercase_ : Any = batch_size lowercase_ : Optional[Any] = num_channels lowercase_ : Tuple = image_size lowercase_ : Optional[Any] = min_resolution lowercase_ : Dict = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[Any] = size lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass def lowerCamelCase ( ) -> Any: lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : Any = Image.open(dataset[4]["""file"""] ) lowercase_ : Dict = Image.open(dataset[5]["""file"""] ) lowercase_ : int = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Optional[int] = prepare_images() # test non-batched lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : Tuple = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : Union[str, Any] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
21
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowercase : str = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class __magic_name__ : UpperCamelCase__ = field( default='''cifar10''', metadata={'''help''': '''Name of a dataset from the datasets package'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''The column name of the images in the files.'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''A folder containing the training data.'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''A folder containing the validation data.'''}) UpperCamelCase__ = field( default=0.15, metadata={'''help''': '''Percent to split off of train for validation.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) }, ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : int = {} if self.train_dir is not None: lowercase_ : int = self.train_dir if self.validation_dir is not None: lowercase_ : Optional[int] = self.validation_dir lowercase_ : Optional[Any] = data_files if data_files else None @dataclass class __magic_name__ : UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''}) UpperCamelCase__ = field( default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Name or path of preprocessor config.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) }, ) UpperCamelCase__ = field( default=0.75, metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''}) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field( default=1e-3, metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''}) def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]: lowercase_ : List[Any] = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def lowerCamelCase ( ) -> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ , lowercase_ , lowercase_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ , lowercase_ , lowercase_ : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , UpperCAmelCase__ , UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ : str = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowercase_ : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase_ : List[str] = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0: lowercase_ : int = ds["""train"""].train_test_split(data_args.train_val_split ) lowercase_ : Union[str, Any] = split["""train"""] lowercase_ : Tuple = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ : Any = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: lowercase_ : Any = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ : List[str] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ : Dict = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: lowercase_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ : List[str] = ViTImageProcessor() # create model if model_args.model_name_or_path: lowercase_ : str = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowercase_ : Optional[int] = ViTMAEForPreTraining(UpperCAmelCase__ ) if training_args.do_train: lowercase_ : Union[str, Any] = ds["""train"""].column_names else: lowercase_ : str = ds["""validation"""].column_names if data_args.image_column_name is not None: lowercase_ : Optional[int] = data_args.image_column_name elif "image" in column_names: lowercase_ : Tuple = """image""" elif "img" in column_names: lowercase_ : int = """img""" else: lowercase_ : List[Any] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: lowercase_ : Union[str, Any] = image_processor.size["""shortest_edge"""] else: lowercase_ : str = (image_processor.size["""height"""], image_processor.size["""width"""]) lowercase_ : List[str] = Compose( [ Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(UpperCAmelCase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(UpperCAmelCase__ : List[str] ): lowercase_ : int = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowercase_ : Optional[Any] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCAmelCase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowercase_ : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCAmelCase__ ) # Compute absolute learning rate lowercase_ : Union[str, Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: lowercase_ : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer lowercase_ : int = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: lowercase_ : Optional[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ : List[Any] = last_checkpoint lowercase_ : str = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase_ : int = trainer.evaluate() trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) # Write model card and (optionally) push to hub lowercase_ : Union[str, Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase__ ) else: trainer.create_model_card(**UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any ) -> str: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
21
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
21
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
21
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): if audio_length_in_s is None: lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate lowercase_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowercase_ : List[Any] = int(lowercase_ ) if sample_size % down_scale_factor != 0: lowercase_ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowercase_ : Any = int(lowercase_ ) lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) # set step values self.scheduler.set_timesteps(lowercase_ , device=audio.device ) lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase_ )
21
1
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
21
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Union[str, Any] = "src/transformers" _lowercase : str = "docs/source/en" _lowercase : Union[str, Any] = "." def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int: with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Union[str, Any] = f.readlines() # Find the start prompt. lowercase_ : Optional[Any] = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 lowercase_ : int = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ ) return [m.group(0 ) for m in matches] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]: lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ ) lowercase_ : List[str] = (width - text_length) // 2 lowercase_ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) -> Any: lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = None if attr_name.endswith("""Tokenizer""" ): lowercase_ : Optional[int] = slow_tokenizers lowercase_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowercase_ : Optional[Any] = fast_tokenizers lowercase_ : Dict = attr_name[:-13] elif _re_tf_models.match(UpperCAmelCase__ ) is not None: lowercase_ : str = tf_models lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0] elif _re_flax_models.match(UpperCAmelCase__ ) is not None: lowercase_ : List[str] = flax_models lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0] elif _re_pt_models.match(UpperCAmelCase__ ) is not None: lowercase_ : Tuple = pt_models lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] ) # Let's build that table! lowercase_ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns] lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowercase_ : int = {True: """✅""", False: """❌"""} for name in model_names: lowercase_ : str = model_name_to_prefix[name] lowercase_ : Any = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n" return table def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str: lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file( filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowercase_ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase : List[str] = { "configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"], "tokenization_electra": ["ElectraTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = ["ElectraTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "ElectraForCausalLM", "ElectraForMaskedLM", "ElectraForMultipleChoice", "ElectraForPreTraining", "ElectraForQuestionAnswering", "ElectraForSequenceClassification", "ElectraForTokenClassification", "ElectraModel", "ElectraPreTrainedModel", "load_tf_weights_in_electra", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFElectraForMaskedLM", "TFElectraForMultipleChoice", "TFElectraForPreTraining", "TFElectraForQuestionAnswering", "TFElectraForSequenceClassification", "TFElectraForTokenClassification", "TFElectraModel", "TFElectraPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ "FlaxElectraForCausalLM", "FlaxElectraForMaskedLM", "FlaxElectraForMultipleChoice", "FlaxElectraForPreTraining", "FlaxElectraForQuestionAnswering", "FlaxElectraForSequenceClassification", "FlaxElectraForTokenClassification", "FlaxElectraModel", "FlaxElectraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __magic_name__ ( ctypes.Structure): # _fields is a specific attr expected by ctypes UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def lowerCamelCase ( ) -> List[Any]: if os.name == "nt": lowercase_ : List[Any] = CursorInfo() lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> str: if os.name == "nt": lowercase_ : int = CursorInfo() lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> Any: try: hide_cursor() yield finally: show_cursor()
21
1
'''simple docstring''' _lowercase : dict[tuple[int, int, int], int] = {} def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase_ : int = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase_ : str = _calculate(days - 1 , UpperCAmelCase__ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase_ : List[Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase_ : int = _calculate(days - 1 , UpperCAmelCase__ , 0 ) lowercase_ : int = state_late + state_absent + state_ontime lowercase_ : Optional[int] = prizestrings return prizestrings def lowerCamelCase ( UpperCAmelCase__ : int = 30 ) -> int: return _calculate(UpperCAmelCase__ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
21
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Optional[int] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**lowercase_ ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''}) UpperCamelCase__ = field( default='''O1''', metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase_ : Optional[Any] = torch.device("""cpu""" ) lowercase_ : Tuple = 0 elif is_torch_tpu_available(): lowercase_ : Optional[int] = xm.xla_device() lowercase_ : str = 0 else: lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.n_gpu > 0
21
1
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 _lowercase : Any = get_tests_dir("fixtures/dummy_feature_extractor_config.json") _lowercase : Tuple = get_tests_dir("fixtures/vocab.json") _lowercase : Optional[int] = get_tests_dir("fixtures") class __magic_name__ ( unittest.TestCase): UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Union[str, Any] = 0 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : int = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : int = WavaVecaConfig() lowercase_ : str = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) # save in new folder model_config.save_pretrained(lowercase_ ) processor.save_pretrained(lowercase_ ) lowercase_ : Tuple = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(lowercase_ , os.path.join(lowercase_ , lowercase_ ) ) copyfile(lowercase_ , os.path.join(lowercase_ , """vocab.json""" ) ) lowercase_ : str = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : List[str] = WavaVecaFeatureExtractor() lowercase_ : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase_ : Optional[Any] = WavaVecaProcessor(lowercase_ , lowercase_ ) # save in new folder processor.save_pretrained(lowercase_ ) # drop `processor_class` in tokenizer with open(os.path.join(lowercase_ , lowercase_ ) , """r""" ) as f: lowercase_ : List[Any] = json.load(lowercase_ ) config_dict.pop("""processor_class""" ) with open(os.path.join(lowercase_ , lowercase_ ) , """w""" ) as f: f.write(json.dumps(lowercase_ ) ) lowercase_ : Optional[int] = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Tuple = WavaVecaFeatureExtractor() lowercase_ : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase_ : Tuple = WavaVecaProcessor(lowercase_ , lowercase_ ) # save in new folder processor.save_pretrained(lowercase_ ) # drop `processor_class` in feature extractor with open(os.path.join(lowercase_ , lowercase_ ) , """r""" ) as f: lowercase_ : Optional[Any] = json.load(lowercase_ ) config_dict.pop("""processor_class""" ) with open(os.path.join(lowercase_ , lowercase_ ) , """w""" ) as f: f.write(json.dumps(lowercase_ ) ) lowercase_ : Any = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Any = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" ) model_config.save_pretrained(lowercase_ ) # copy relevant files copyfile(lowercase_ , os.path.join(lowercase_ , """vocab.json""" ) ) # create emtpy sample processor with open(os.path.join(lowercase_ , lowercase_ ) , """w""" ) as f: f.write("""{}""" ) lowercase_ : Any = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase_ ): lowercase_ : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase_ ): lowercase_ : Optional[Any] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ ) lowercase_ : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) lowercase_ : Optional[Any] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) lowercase_ : Optional[int] = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase_ : Optional[Any] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ , use_fast=lowercase_ ) lowercase_ : List[Any] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): try: AutoConfig.register("""custom""" , lowercase_ ) AutoFeatureExtractor.register(lowercase_ , lowercase_ ) AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ ) AutoProcessor.register(lowercase_ , lowercase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase_ ): AutoProcessor.register(lowercase_ , lowercase_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase_ : str = CustomFeatureExtractor.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Optional[int] = os.path.join(lowercase_ , """vocab.txt""" ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase_ : Any = CustomTokenizer(lowercase_ ) lowercase_ : Tuple = CustomProcessor(lowercase_ , lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(lowercase_ ) lowercase_ : Any = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = False class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = False class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''AutoFeatureExtractor''' UpperCamelCase__ = '''AutoTokenizer''' UpperCamelCase__ = False try: AutoConfig.register("""custom""" , lowercase_ ) AutoFeatureExtractor.register(lowercase_ , lowercase_ ) AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ ) AutoProcessor.register(lowercase_ , lowercase_ ) # If remote code is not set, the default is to use local classes. lowercase_ : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. lowercase_ : List[str] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. lowercase_ : int = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" ) self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" ) @is_staging_test class __magic_name__ ( unittest.TestCase): UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict ): try: delete_repo(token=cls._token , repo_id="""test-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : int = WavaVecaProcessor.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowercase_ , """test-processor""" ) , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : str = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = WavaVecaProcessor.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowercase_ , """test-processor-org""" ) , push_to_hub=lowercase_ , use_auth_token=self._token , organization="""valid_org""" , ) lowercase_ : int = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() lowercase_ : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Optional[int] = os.path.join(lowercase_ , """vocab.txt""" ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase_ : int = CustomTokenizer(lowercase_ ) lowercase_ : Any = CustomProcessor(lowercase_ , lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token ) lowercase_ : Optional[int] = Repository(lowercase_ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token ) processor.save_pretrained(lowercase_ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { """AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""", """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(lowercase_ , """tokenizer_config.json""" ) ) as f: lowercase_ : Dict = json.load(lowercase_ ) self.assertDictEqual( tokenizer_config["""auto_map"""] , { """AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None], """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(lowercase_ , """custom_feature_extraction.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(lowercase_ , """custom_tokenization.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(lowercase_ , """custom_processing.py""" ) ) ) repo.push_to_hub() lowercase_ : Optional[int] = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=lowercase_ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
21
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' import mpmath # for roots of unity import numpy as np class __magic_name__ : def __init__( self : str , lowercase_ : List[Any]=None , lowercase_ : Tuple=None ): # Input as list lowercase_ : Dict = list(poly_a or [0] )[:] lowercase_ : List[str] = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() lowercase_ : Dict = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() lowercase_ : str = len(self.polyB ) # Add 0 to make lengths equal a power of 2 lowercase_ : str = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform lowercase_ : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product lowercase_ : int = self.__multiply() def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[int] ): lowercase_ : Tuple = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(lowercase_ ) <= 1: return dft[0] # lowercase_ : Union[str, Any] = self.c_max_length // 2 while next_ncol > 0: lowercase_ : Tuple = [[] for i in range(lowercase_ )] lowercase_ : Optional[int] = self.root**next_ncol # First half of next step lowercase_ : Tuple = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(lowercase_ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step lowercase_ : Optional[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(lowercase_ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update lowercase_ : Any = new_dft lowercase_ : Union[str, Any] = next_ncol // 2 return dft[0] def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[Any] = self.__dft("""A""" ) lowercase_ : str = self.__dft("""B""" ) lowercase_ : Tuple = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT lowercase_ : List[str] = 2 while next_ncol <= self.c_max_length: lowercase_ : Dict = [[] for i in range(lowercase_ )] lowercase_ : List[Any] = self.root ** (next_ncol // 2) lowercase_ : Tuple = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update lowercase_ : List[str] = new_inverse_c next_ncol *= 2 # Unpack lowercase_ : str = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Optional[Any] ): lowercase_ : Tuple = """A = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) lowercase_ : Tuple = """B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) lowercase_ : Optional[Any] = """A*B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
1
'''simple docstring''' import os from collections.abc import Iterator def lowerCamelCase ( UpperCAmelCase__ : str = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(UpperCAmelCase__ ): lowercase_ : Dict = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(UpperCAmelCase__ )[1] in (".py", ".ipynb"): yield os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ).lstrip("""./""" ) def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[str]: return F'''{i * ' '}*''' if i else "\n##" def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str: lowercase_ : List[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(UpperCAmelCase__ ) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(UpperCAmelCase__ )} {new_part.replace('_' , ' ' ).title()}''' ) return new_path def lowerCamelCase ( UpperCAmelCase__ : str = "." ) -> None: lowercase_ : List[str] = """""" for filepath in sorted(good_file_paths(UpperCAmelCase__ ) ): lowercase_ , lowercase_ : Optional[Any] = os.path.split(UpperCAmelCase__ ) if filepath != old_path: lowercase_ : Tuple = print_path(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = (filepath.count(os.sep ) + 1) if filepath else 0 lowercase_ : int = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" ) lowercase_ : List[Any] = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(F'''{md_prefix(UpperCAmelCase__ )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md(".")
21
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: lowercase_ : Any = prime_factors(UpperCAmelCase__ ) if is_square_free(UpperCAmelCase__ ): return -1 if len(UpperCAmelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000 ) -> int: lowercase_ : List[Any] = -1 lowercase_ : int = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c lowercase_ : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a) lowercase_ : List[Any] = n - a - b if c * c == (a * a + b * b): lowercase_ : int = a * b * c if candidate >= product: lowercase_ : Dict = candidate return product if __name__ == "__main__": print(f"""{solution() = }""")
21
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
1
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCamelCase ( UpperCAmelCase__ : ndarray ) -> float: return np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) class __magic_name__ : def __init__( self : int , *, lowercase_ : float = np.inf , lowercase_ : str = "linear" , lowercase_ : float = 0.0 , ): lowercase_ : Any = regularization lowercase_ : List[str] = gamma if kernel == "linear": lowercase_ : Any = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("""rbf kernel requires gamma""" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("""gamma must be float or int""" ) if not self.gamma > 0: raise ValueError("""gamma must be > 0""" ) lowercase_ : Optional[Any] = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: lowercase_ : Any = f'''Unknown kernel: {kernel}''' raise ValueError(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : ndarray , lowercase_ : ndarray ): return np.dot(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : ndarray , lowercase_ : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : list[ndarray] , lowercase_ : ndarray ): lowercase_ : Union[str, Any] = observations lowercase_ : List[Any] = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((lowercase_) , ) : List[str] = np.shape(lowercase_ ) def to_minimize(lowercase_ : ndarray ) -> float: lowercase_ : List[str] = 0 ((lowercase_) , ) : Tuple = np.shape(lowercase_ ) for i in range(lowercase_ ): for j in range(lowercase_ ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(lowercase_ ) lowercase_ : Optional[Any] = LinearConstraint(lowercase_ , 0 , 0 ) lowercase_ : Dict = Bounds(0 , self.regularization ) lowercase_ : Tuple = minimize( lowercase_ , np.ones(lowercase_ ) , bounds=lowercase_ , constraints=[ly_contraint] ).x lowercase_ : int = l_star # calculating mean offset of separation plane to points lowercase_ : List[str] = 0 for i in range(lowercase_ ): for j in range(lowercase_ ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) lowercase_ : Tuple = s / n def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : ndarray ): lowercase_ : List[str] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , lowercase_ ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : list ) -> list: def merge(UpperCAmelCase__ : list , UpperCAmelCase__ : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(UpperCAmelCase__ ) <= 1: return collection lowercase_ : str = len(UpperCAmelCase__ ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Any = input("Enter numbers separated by a comma:\n").strip() _lowercase : List[Any] = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
21
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]: # Initialise PyTorch model lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _lowercase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
21
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = (UniPCMultistepScheduler,) UpperCamelCase__ = (('''num_inference_steps''', 25),) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **lowercase_ : Union[str, Any] ): lowercase_ : Any = { """num_train_timesteps""": 1000, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", """solver_order""": 2, """solver_type""": """bh2""", } config.update(**lowercase_ ) return config def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any]=0 , **lowercase_ : List[Any] ): lowercase_ : Dict = dict(self.forward_default_kwargs ) lowercase_ : str = kwargs.pop("""num_inference_steps""" , lowercase_ ) lowercase_ : List[str] = self.dummy_sample lowercase_ : List[Any] = 0.1 * sample lowercase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowercase_ : Optional[Any] = self.get_scheduler_config(**lowercase_ ) lowercase_ : str = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals lowercase_ : Any = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) lowercase_ : str = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals lowercase_ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order] lowercase_ , lowercase_ : str = sample, sample for t in range(lowercase_ , time_step + scheduler.config.solver_order + 1 ): lowercase_ : Any = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample lowercase_ : List[str] = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int]=0 , **lowercase_ : int ): lowercase_ : Optional[int] = dict(self.forward_default_kwargs ) lowercase_ : Optional[int] = kwargs.pop("""num_inference_steps""" , lowercase_ ) lowercase_ : List[str] = self.dummy_sample lowercase_ : Tuple = 0.1 * sample lowercase_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowercase_ : List[Any] = self.get_scheduler_config() lowercase_ : int = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) lowercase_ : Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) lowercase_ : Tuple = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) lowercase_ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order] lowercase_ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample lowercase_ : Optional[Any] = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any]=None , **lowercase_ : Any ): if scheduler is None: lowercase_ : str = self.scheduler_classes[0] lowercase_ : str = self.get_scheduler_config(**lowercase_ ) lowercase_ : List[Any] = scheduler_class(**lowercase_ ) lowercase_ : Tuple = self.scheduler_classes[0] lowercase_ : Tuple = self.get_scheduler_config(**lowercase_ ) lowercase_ : Optional[int] = scheduler_class(**lowercase_ ) lowercase_ : Tuple = 10 lowercase_ : Any = self.dummy_model() lowercase_ : Tuple = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): lowercase_ : str = model(lowercase_ , lowercase_ ) lowercase_ : Any = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Dict = dict(self.forward_default_kwargs ) lowercase_ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , lowercase_ ) for scheduler_class in self.scheduler_classes: lowercase_ : List[Any] = self.get_scheduler_config() lowercase_ : List[Any] = scheduler_class(**lowercase_ ) lowercase_ : Union[str, Any] = self.dummy_sample lowercase_ : Dict = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , """set_timesteps""" ): scheduler.set_timesteps(lowercase_ ) elif num_inference_steps is not None and not hasattr(lowercase_ , """set_timesteps""" ): lowercase_ : Union[str, Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10] lowercase_ : Any = dummy_past_residuals[: scheduler.config.solver_order] lowercase_ : Dict = scheduler.timesteps[5] lowercase_ : Dict = scheduler.timesteps[6] lowercase_ : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample lowercase_ : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self : int ): # make sure that iterating over schedulers with same config names gives same results # for defaults lowercase_ : Optional[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) lowercase_ : Tuple = self.full_loop(scheduler=lowercase_ ) lowercase_ : List[Any] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.24_64 ) < 1E-3 lowercase_ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowercase_ : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config ) lowercase_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowercase_ : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) lowercase_ : Dict = self.full_loop(scheduler=lowercase_ ) lowercase_ : Optional[int] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.24_64 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Any ): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): self.check_over_configs(thresholding=lowercase_ ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , solver_order=lowercase_ , solver_type=lowercase_ , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , ) lowercase_ : List[Any] = self.full_loop( solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , ) assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers" def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): self.check_over_configs(lower_order_final=lowercase_ ) self.check_over_configs(lower_order_final=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=lowercase_ , time_step=0 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Dict = self.full_loop() lowercase_ : Optional[Any] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.24_64 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[Any] = self.full_loop(prediction_type="""v_prediction""" ) lowercase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.10_14 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[int] = self.scheduler_classes[0] lowercase_ : Optional[Any] = self.get_scheduler_config(thresholding=lowercase_ , dynamic_thresholding_ratio=0 ) lowercase_ : Any = scheduler_class(**lowercase_ ) lowercase_ : str = 10 lowercase_ : Dict = self.dummy_model() lowercase_ : List[Any] = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): lowercase_ : Optional[int] = model(lowercase_ , lowercase_ ) lowercase_ : Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample assert sample.dtype == torch.floataa def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : Tuple ): for scheduler_class in self.scheduler_classes: lowercase_ : Dict = self.get_scheduler_config(**lowercase_ ) lowercase_ : List[str] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
21
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : str = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : Optional[int] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "PIL.Image.Image" UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Tuple ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : int = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Union[str, Any] = {} lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : int = PIL.Image.open(lowercase_ ) else: lowercase_ : str = path.split("""::""" )[-1] try: lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : str = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Dict = BytesIO(f.read() ) lowercase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : Optional[int] = storage.field("""bytes""" ) else: lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : Dict = storage.field("""path""" ) else: lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : Optional[int] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Tuple = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Optional[Any] ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : int = f.read() return bytes_ lowercase_ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : Any = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : Tuple = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : List[Any] = array.dtype lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Dict = dtype.kind lowercase_ : List[Any] = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : int = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : str = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
21
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Any = logging.get_logger(__name__) _lowercase : Optional[int] = { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json" ), } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''xlm-roberta''' def __init__( self : Any , lowercase_ : Dict=30522 , lowercase_ : Optional[Any]=768 , lowercase_ : Any=12 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=3072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Union[str, Any]=2 , lowercase_ : Tuple=0.02 , lowercase_ : str=1E-12 , lowercase_ : Union[str, Any]=1 , lowercase_ : Union[str, Any]=0 , lowercase_ : str=2 , lowercase_ : Any="absolute" , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=None , **lowercase_ : List[Any] , ): super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowercase_ : int = vocab_size lowercase_ : List[Any] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Optional[Any] = num_attention_heads lowercase_ : str = hidden_act lowercase_ : List[Any] = intermediate_size lowercase_ : Tuple = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Any = type_vocab_size lowercase_ : Dict = initializer_range lowercase_ : Any = layer_norm_eps lowercase_ : Dict = position_embedding_type lowercase_ : str = use_cache lowercase_ : Dict = classifier_dropout class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): if self.task == "multiple-choice": lowercase_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
21
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
21
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() _lowercase : Optional[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Tuple: # initialize config if "resnet-50" in model_name: lowercase_ : Tuple = ResNetConfig.from_pretrained("""microsoft/resnet-50""" ) elif "resnet-101" in model_name: lowercase_ : str = ResNetConfig.from_pretrained("""microsoft/resnet-101""" ) else: raise ValueError("""Model name should include either resnet50 or resnet101""" ) lowercase_ : Optional[int] = DetrConfig(use_timm_backbone=UpperCAmelCase__ , backbone_config=UpperCAmelCase__ ) # set label attributes lowercase_ : Dict = """panoptic""" in model_name if is_panoptic: lowercase_ : Optional[int] = 250 else: lowercase_ : Optional[Any] = 91 lowercase_ : Dict = """huggingface/label-files""" lowercase_ : Tuple = """coco-detection-id2label.json""" lowercase_ : str = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowercase_ : List[str] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} lowercase_ : List[str] = idalabel lowercase_ : Dict = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCamelCase ( UpperCAmelCase__ : int ) -> Dict: # here we list all keys to be renamed (original name on the left, our name on the right) lowercase_ : List[Any] = [] # stem # fmt: off rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") ) rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") ) rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") ) rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") ) rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) return rename_keys def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) -> str: lowercase_ : Dict = state_dict.pop(UpperCAmelCase__ ) lowercase_ : List[Any] = val def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=False ) -> Dict: lowercase_ : Union[str, Any] = """""" if is_panoptic: lowercase_ : Any = """detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowercase_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowercase_ : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase_ : Any = in_proj_weight[:256, :] lowercase_ : Union[str, Any] = in_proj_bias[:256] lowercase_ : str = in_proj_weight[256:512, :] lowercase_ : str = in_proj_bias[256:512] lowercase_ : Any = in_proj_weight[-256:, :] lowercase_ : Optional[Any] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention lowercase_ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) lowercase_ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase_ : Tuple = in_proj_weight[:256, :] lowercase_ : List[Any] = in_proj_bias[:256] lowercase_ : str = in_proj_weight[256:512, :] lowercase_ : str = in_proj_bias[256:512] lowercase_ : List[Any] = in_proj_weight[-256:, :] lowercase_ : Optional[Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention lowercase_ : Union[str, Any] = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) lowercase_ : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict lowercase_ : int = in_proj_weight_cross_attn[:256, :] lowercase_ : Union[str, Any] = in_proj_bias_cross_attn[:256] lowercase_ : Optional[Any] = in_proj_weight_cross_attn[256:512, :] lowercase_ : Optional[Any] = in_proj_bias_cross_attn[256:512] lowercase_ : str = in_proj_weight_cross_attn[-256:, :] lowercase_ : Union[str, Any] = in_proj_bias_cross_attn[-256:] def lowerCamelCase ( ) -> List[str]: lowercase_ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase_ : Dict = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ) return im @torch.no_grad() def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False ) -> Dict: lowercase_ , lowercase_ : str = get_detr_config(UpperCAmelCase__ ) # load original model from torch hub lowercase_ : Optional[int] = { """detr-resnet-50""": """detr_resnet50""", """detr-resnet-101""": """detr_resnet101""", } logger.info(F'''Converting model {model_name}...''' ) lowercase_ : Dict = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase__ ).eval() lowercase_ : List[str] = detr.state_dict() # rename keys for src, dest in create_rename_keys(UpperCAmelCase__ ): if is_panoptic: lowercase_ : Optional[int] = """detr.""" + src rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase__ , is_panoptic=UpperCAmelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowercase_ : List[str] = """detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): lowercase_ : List[Any] = state_dict.pop(UpperCAmelCase__ ) lowercase_ : Dict = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowercase_ : Tuple = state_dict.pop(UpperCAmelCase__ ) lowercase_ : List[str] = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: lowercase_ : int = state_dict.pop(UpperCAmelCase__ ) lowercase_ : Any = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): lowercase_ : List[Any] = state_dict.pop(UpperCAmelCase__ ) lowercase_ : Dict = val # finally, create HuggingFace model and load state dict lowercase_ : Optional[int] = DetrForSegmentation(UpperCAmelCase__ ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) model.eval() # verify our conversion on an image lowercase_ : Tuple = """coco_panoptic""" if is_panoptic else """coco_detection""" lowercase_ : Optional[int] = DetrImageProcessor(format=UpperCAmelCase__ ) lowercase_ : Dict = processor(images=prepare_img() , return_tensors="""pt""" ) lowercase_ : str = encoding["""pixel_values"""] lowercase_ : Optional[int] = detr(UpperCAmelCase__ ) lowercase_ : List[str] = model(UpperCAmelCase__ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) processor.save_pretrained(UpperCAmelCase__ ) if push_to_hub: # Upload model and image processor to the hub logger.info("""Uploading PyTorch model and image processor to the hub...""" ) model.push_to_hub(F'''nielsr/{model_name}''' ) processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowercase : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_name", default="detr-resnet-50", type=str, choices=["detr-resnet-50", "detr-resnet-101"], help="Name of the DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") _lowercase : List[Any] = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
21
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = DistilBertTokenizer UpperCamelCase__ = DistilBertTokenizerFast UpperCamelCase__ = True @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
21
1
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __magic_name__ : def __init__( self : int , lowercase_ : int , lowercase_ : Optional[int]=13 , lowercase_ : str=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=99 , lowercase_ : Any=64 , lowercase_ : List[Any]=32 , lowercase_ : int=5 , lowercase_ : Optional[Any]=4 , lowercase_ : int=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=512 , lowercase_ : Optional[int]=16 , lowercase_ : List[str]=2 , lowercase_ : Any=0.02 , lowercase_ : Dict=3 , lowercase_ : Optional[int]=4 , lowercase_ : Any=None , ): lowercase_ : Dict = parent lowercase_ : Any = batch_size lowercase_ : Dict = seq_length lowercase_ : Any = is_training lowercase_ : List[str] = use_input_mask lowercase_ : Tuple = use_token_type_ids lowercase_ : Union[str, Any] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : str = embedding_size lowercase_ : Optional[Any] = num_hidden_layers lowercase_ : Union[str, Any] = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : Optional[Any] = hidden_act lowercase_ : Dict = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : int = max_position_embeddings lowercase_ : List[str] = type_vocab_size lowercase_ : List[Any] = type_sequence_label_size lowercase_ : Union[str, Any] = initializer_range lowercase_ : Optional[Any] = num_labels lowercase_ : Any = num_choices lowercase_ : str = scope def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ : Optional[Any] = None if self.use_input_mask: lowercase_ : str = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Optional[Any] = None if self.use_token_type_ids: lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = None lowercase_ : List[str] = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self : Any ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any , lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int ): lowercase_ : Tuple = MegatronBertModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ ) lowercase_ : str = model(lowercase_ , token_type_ids=lowercase_ ) lowercase_ : Dict = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Dict ): lowercase_ : int = MegatronBertForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ): lowercase_ : Any = MegatronBertForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Any ): lowercase_ : List[str] = MegatronBertForNextSentencePrediction(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[str] ): lowercase_ : Union[str, Any] = MegatronBertForPreTraining(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ): lowercase_ : Dict = MegatronBertForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[Any] = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Optional[Any] = self.num_labels lowercase_ : int = MegatronBertForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any , lowercase_ : Tuple ): lowercase_ : Dict = self.num_labels lowercase_ : str = MegatronBertForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : List[Any] ): lowercase_ : List[str] = self.num_choices lowercase_ : Any = MegatronBertForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ : Any = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) UpperCamelCase__ = ( { '''feature-extraction''': MegatronBertModel, '''fill-mask''': MegatronBertForMaskedLM, '''question-answering''': MegatronBertForQuestionAnswering, '''text-classification''': MegatronBertForSequenceClassification, '''text-generation''': MegatronBertForCausalLM, '''token-classification''': MegatronBertForTokenClassification, '''zero-shot''': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase__ = True # test_resize_embeddings = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any]=False ): lowercase_ : List[str] = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): lowercase_ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ ) lowercase_ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = MegatronBertModelTester(self ) lowercase_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase_ ) def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Any: return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) _lowercase : List[Any] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow @unittest.skip("""Model is not available.""" ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = """nvidia/megatron-bert-uncased-345m""" if "MYDIR" in os.environ: lowercase_ : str = os.path.join(os.environ["""MYDIR"""] , lowercase_ ) lowercase_ : Optional[int] = MegatronBertModel.from_pretrained(lowercase_ ) model.to(lowercase_ ) model.half() lowercase_ : List[str] = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): lowercase_ : str = model(lowercase_ )[0] lowercase_ : int = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , lowercase_ ) lowercase_ : Optional[int] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28] for ii in range(3 ): for jj in range(3 ): lowercase_ : Dict = output[0, ii, jj] lowercase_ : Optional[Any] = expected[3 * ii + jj] lowercase_ : Optional[Any] = """ii={} jj={} a={} b={}""".format(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) self.assertTrue(math.isclose(lowercase_ , lowercase_ , rel_tol=lowercase_ , abs_tol=lowercase_ ) , msg=lowercase_ )
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowercase : int = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : str ) -> YolosConfig: lowercase_ : Any = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowercase_ : Dict = 192 lowercase_ : List[Any] = 768 lowercase_ : Dict = 12 lowercase_ : List[str] = 3 lowercase_ : List[str] = [800, 1333] lowercase_ : Tuple = False elif yolos_name == "yolos_s_dWr": lowercase_ : str = 330 lowercase_ : Tuple = 14 lowercase_ : Optional[int] = 6 lowercase_ : List[Any] = 1320 elif "yolos_s" in yolos_name: lowercase_ : Any = 384 lowercase_ : List[Any] = 1536 lowercase_ : Union[str, Any] = 12 lowercase_ : int = 6 elif "yolos_b" in yolos_name: lowercase_ : List[Any] = [800, 1344] lowercase_ : int = 91 lowercase_ : str = """huggingface/label-files""" lowercase_ : List[str] = """coco-detection-id2label.json""" lowercase_ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowercase_ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} lowercase_ : Optional[Any] = idalabel lowercase_ : List[Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : YolosConfig , UpperCAmelCase__ : bool = False ) -> Any: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase_ : Optional[int] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) lowercase_ : str = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase_ : Optional[Any] = in_proj_weight[: config.hidden_size, :] lowercase_ : Union[str, Any] = in_proj_bias[: config.hidden_size] lowercase_ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase_ : Tuple = in_proj_weight[-config.hidden_size :, :] lowercase_ : List[str] = in_proj_bias[-config.hidden_size :] def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: if "backbone" in name: lowercase_ : Union[str, Any] = name.replace("""backbone""" , """vit""" ) if "cls_token" in name: lowercase_ : int = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "det_token" in name: lowercase_ : Optional[int] = name.replace("""det_token""" , """embeddings.detection_tokens""" ) if "mid_pos_embed" in name: lowercase_ : List[str] = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" ) if "pos_embed" in name: lowercase_ : int = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: lowercase_ : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "blocks" in name: lowercase_ : int = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: lowercase_ : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowercase_ : Optional[int] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowercase_ : Union[str, Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowercase_ : Any = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowercase_ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowercase_ : Any = name.replace("""mlp.fc2""" , """output.dense""" ) if "class_embed" in name: lowercase_ : int = name.replace("""class_embed""" , """class_labels_classifier""" ) if "bbox_embed" in name: lowercase_ : Optional[Any] = name.replace("""bbox_embed""" , """bbox_predictor""" ) if "vit.norm" in name: lowercase_ : Optional[int] = name.replace("""vit.norm""" , """vit.layernorm""" ) return name def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : YolosForObjectDetection ) -> dict: for key in orig_state_dict.copy().keys(): lowercase_ : Optional[int] = orig_state_dict.pop(UpperCAmelCase__ ) if "qkv" in key: lowercase_ : str = key.split(""".""" ) lowercase_ : Union[str, Any] = int(key_split[2] ) lowercase_ : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowercase_ : Union[str, Any] = val[:dim, :] lowercase_ : Optional[Any] = val[ dim : dim * 2, : ] lowercase_ : Any = val[-dim:, :] else: lowercase_ : List[str] = val[:dim] lowercase_ : Any = val[dim : dim * 2] lowercase_ : Optional[Any] = val[-dim:] else: lowercase_ : Union[str, Any] = val return orig_state_dict def lowerCamelCase ( ) -> torch.Tensor: lowercase_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase_ : List[str] = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ) return im @torch.no_grad() def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ) -> Optional[Any]: lowercase_ : int = get_yolos_config(UpperCAmelCase__ ) # load original state_dict lowercase_ : List[str] = torch.load(UpperCAmelCase__ , map_location="""cpu""" )["""model"""] # load 🤗 model lowercase_ : str = YolosForObjectDetection(UpperCAmelCase__ ) model.eval() lowercase_ : Dict = convert_state_dict(UpperCAmelCase__ , UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) # Check outputs on an image, prepared by YolosImageProcessor lowercase_ : Union[str, Any] = 800 if yolos_name != """yolos_ti""" else 512 lowercase_ : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=UpperCAmelCase__ ) lowercase_ : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowercase_ : List[Any] = model(**UpperCAmelCase__ ) lowercase_ , lowercase_ : str = outputs.logits, outputs.pred_boxes lowercase_ , lowercase_ : Dict = None, None if yolos_name == "yolos_ti": lowercase_ : Dict = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) lowercase_ : Optional[Any] = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": lowercase_ : Any = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) lowercase_ : List[Any] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": lowercase_ : List[str] = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) lowercase_ : Optional[int] = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": lowercase_ : List[str] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) lowercase_ : str = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": lowercase_ : Tuple = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) lowercase_ : str = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase__ ) if push_to_hub: lowercase_ : Any = { """yolos_ti""": """yolos-tiny""", """yolos_s_200_pre""": """yolos-small""", """yolos_s_300_pre""": """yolos-small-300""", """yolos_s_dWr""": """yolos-small-dwr""", """yolos_base""": """yolos-base""", } print("""Pushing to the hub...""" ) lowercase_ : List[Any] = model_mapping[yolos_name] image_processor.push_to_hub(UpperCAmelCase__ , organization="""hustvl""" ) model.push_to_hub(UpperCAmelCase__ , organization="""hustvl""" ) if __name__ == "__main__": _lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowercase : Tuple = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") _lowercase : Any = logging.getLogger(__name__) @dataclass class __magic_name__ : UpperCamelCase__ = field( default='''tab_fact''', metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''}) UpperCamelCase__ = field( default='''tab_fact''', metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''}, ) UpperCamelCase__ = field( default=1024, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''A csv or a json file containing the training data.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''A csv or a json file containing the validation data.'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''A csv or a json file containing the test data.'''}) def SCREAMING_SNAKE_CASE_ ( self : Dict ): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: lowercase_ : List[str] = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." lowercase_ : Optional[int] = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __magic_name__ : UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, ) UpperCamelCase__ = field( default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) }, ) def lowerCamelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ , lowercase_ , lowercase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ , lowercase_ , lowercase_ : Any = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) lowercase_ : Tuple = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) datasets.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ : Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowercase_ : List[str] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. lowercase_ : List[str] = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: lowercase_ : int = data_args.train_file.split(""".""" )[-1] lowercase_ : Any = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." lowercase_ : List[str] = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(F'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files lowercase_ : List[str] = load_dataset("""csv""" , data_files=UpperCAmelCase__ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files lowercase_ : Optional[Any] = load_dataset("""json""" , data_files=UpperCAmelCase__ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels lowercase_ : List[Any] = raw_datasets["""train"""].features["""label"""].names lowercase_ : Dict = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer lowercase_ : str = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=UpperCAmelCase__ , ) lowercase_ : List[str] = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: lowercase_ : Any = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase_ : int = False # Some models have set the order of the labels to use, so let's make sure we do use it. lowercase_ : Dict = {"""Refused""": 0, """Entailed""": 1} lowercase_ : Optional[int] = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowercase_ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(UpperCAmelCase__ : List[str] ): # Tokenize the texts def _convert_table_text_to_pandas(UpperCAmelCase__ : Dict ): lowercase_ : Optional[int] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] lowercase_ : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd lowercase_ : List[Any] = examples["""statement"""] lowercase_ : Dict = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) lowercase_ : Optional[int] = tokenizer(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ ) lowercase_ : Union[str, Any] = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): lowercase_ : Optional[Any] = raw_datasets.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) lowercase_ : List[Any] = raw_datasets["""train"""] if data_args.max_train_samples is not None: lowercase_ : Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) lowercase_ : Union[str, Any] = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: lowercase_ : str = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) lowercase_ : Optional[Any] = raw_datasets["""test"""] if data_args.max_predict_samples is not None: lowercase_ : Any = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(UpperCAmelCase__ ) ) , 3 ): logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(UpperCAmelCase__ : EvalPrediction ): lowercase_ : Optional[Any] = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase__ ) else p.predictions lowercase_ : Optional[int] = np.argmax(UpperCAmelCase__ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase_ : Tuple = default_data_collator elif training_args.fpaa: lowercase_ : int = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) else: lowercase_ : Union[str, Any] = None # Initialize our Trainer lowercase_ : Any = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ : int = None if training_args.resume_from_checkpoint is not None: lowercase_ : str = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ : Tuple = last_checkpoint lowercase_ : Tuple = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) lowercase_ : Optional[Any] = train_result.metrics lowercase_ : int = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ ) ) lowercase_ : int = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , UpperCAmelCase__ ) trainer.save_metrics("""train""" , UpperCAmelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase_ : Union[str, Any] = trainer.evaluate(eval_dataset=UpperCAmelCase__ ) lowercase_ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) ) trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. lowercase_ : Optional[Any] = predict_dataset.remove_columns("""label""" ) lowercase_ : Any = trainer.predict(UpperCAmelCase__ , metric_key_prefix="""predict""" ).predictions lowercase_ : str = np.argmax(UpperCAmelCase__ , axis=1 ) lowercase_ : Union[str, Any] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(UpperCAmelCase__ ): lowercase_ : List[Any] = label_list[item] writer.write(F'''{index}\t{item}\n''' ) lowercase_ : Optional[int] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase__ ) else: trainer.create_model_card(**UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
21
'''simple docstring''' import os import numpy import onnx def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple: lowercase_ : Tuple = a.name lowercase_ : Tuple = b.name lowercase_ : Any = """""" lowercase_ : List[Any] = """""" lowercase_ : List[Any] = a == b lowercase_ : Union[str, Any] = name_a lowercase_ : Optional[Any] = name_b return res def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]: lowercase_ : int = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : Optional[Any] = inits[i].name lowercase_ : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]: lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ ) lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ ) lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ : List[Any] = list(model.graph.initializer ) lowercase_ : int = set() lowercase_ : int = {} lowercase_ : str = [] lowercase_ : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) lowercase_ : Dict = inits[j].data_type lowercase_ : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size lowercase_ : int = inits[i].name lowercase_ : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowercase_ : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = """optimized_""" + model_file_name lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
21
1
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : bool , UpperCAmelCase__ : bool ) -> Optional[Any]: def run_func(UpperCAmelCase__ : Dict ): @wraps(UpperCAmelCase__ ) def run_in_eager_mode(*UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str] ): return func(*UpperCAmelCase__ , **UpperCAmelCase__ ) @wraps(UpperCAmelCase__ ) @tf.function(experimental_compile=UpperCAmelCase__ ) def run_in_graph_mode(*UpperCAmelCase__ : str , **UpperCAmelCase__ : str ): return func(*UpperCAmelCase__ , **UpperCAmelCase__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> ["tf.Tensor"]: lowercase_ : str = random.Random() lowercase_ : int = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(UpperCAmelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = "TensorFlow" @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return tf.__version__ def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int ): # initialize GPU on separate process lowercase_ : Union[str, Any] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase_ : Optional[int] = self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ ) return self._measure_speed(_inference ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str , lowercase_ : int , lowercase_ : int ): lowercase_ : int = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase_ : List[Any] = self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ ) return self._measure_speed(_train ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str , lowercase_ : int , lowercase_ : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ ) lowercase_ : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase_ : str = self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ ) return self._measure_memory(_inference ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ ) lowercase_ : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase_ : List[Any] = self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ ) return self._measure_memory(_train ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : int ): lowercase_ : Any = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowercase_ : Optional[int] = ( hasattr(lowercase_ , """architectures""" ) and isinstance(config.architectures , lowercase_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowercase_ : int = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowercase_ : int = __import__("""transformers""" , fromlist=[model_class] ) lowercase_ : Optional[Any] = getattr(lowercase_ , lowercase_ ) lowercase_ : Any = model_cls(lowercase_ ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowercase_ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](lowercase_ ) # encoder-decoder has vocab size saved differently lowercase_ : Tuple = config.vocab_size if hasattr(lowercase_ , """vocab_size""" ) else config.encoder.vocab_size lowercase_ : str = random_input_ids(lowercase_ , lowercase_ , lowercase_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowercase_ , decoder_input_ids=lowercase_ , training=lowercase_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowercase_ , training=lowercase_ ) lowercase_ : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ): lowercase_ : int = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowercase_ : str = ( hasattr(lowercase_ , """architectures""" ) and isinstance(config.architectures , lowercase_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowercase_ : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowercase_ : Dict = __import__("""transformers""" , fromlist=[model_class] ) lowercase_ : Optional[Any] = getattr(lowercase_ , lowercase_ ) lowercase_ : int = model_cls(lowercase_ ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowercase_ : Tuple = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowercase_ ) # encoder-decoder has vocab size saved differently lowercase_ : Optional[Any] = config.vocab_size if hasattr(lowercase_ , """vocab_size""" ) else config.encoder.vocab_size lowercase_ : str = random_input_ids(lowercase_ , lowercase_ , lowercase_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowercase_ : Dict = model(lowercase_ , decoder_input_ids=lowercase_ , labels=lowercase_ , training=lowercase_ )[0] lowercase_ : Dict = tf.gradients(lowercase_ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ , training=lowercase_ )[0] lowercase_ : Any = tf.gradients(lowercase_ , model.trainable_variables ) return gradients lowercase_ : Union[str, Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Optional[int] ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(lowercase_ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowercase_ : Tuple = timeit.repeat( lowercase_ , repeat=self.args.repeat , number=10 , ) return min(lowercase_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) lowercase_ : int = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) lowercase_ : Dict = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() lowercase_ : Union[str, Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowercase_ : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(lowercase_ ) lowercase_ : Optional[Any] = meminfo.used lowercase_ : Optional[Any] = Memory(lowercase_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) lowercase_ : Any = None else: lowercase_ : Union[str, Any] = measure_peak_memory_cpu(lowercase_ ) lowercase_ : List[str] = Memory(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else memory_bytes if self.args.trace_memory_line_by_line: lowercase_ : List[str] = stop_memory_tracing(lowercase_ ) if memory is None: lowercase_ : List[Any] = summary.total else: lowercase_ : Optional[Any] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
21
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ): lowercase_ : Optional[Any] = {} lowercase_ : Tuple = {} if prompt is not None: lowercase_ : Tuple = prompt if generate_kwargs is not None: lowercase_ : List[str] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase_ : List[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase_ : str = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ): lowercase_ : List[Any] = load_image(lowercase_ ) if prompt is not None: if not isinstance(lowercase_ , lowercase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase_ : List[Any] = self.model.config.model_type if model_type == "git": lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase_ : str = None return model_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowercase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase_ : Any = None if generate_kwargs is None: lowercase_ : Optional[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase_ : Dict = model_inputs.pop(self.model.main_input_name ) lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ): lowercase_ : List[str] = [] for output_ids in model_outputs: lowercase_ : Union[str, Any] = { """generated_text""": self.tokenizer.decode( lowercase_ , skip_special_tokens=lowercase_ , ) } records.append(lowercase_ ) return records
21
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : int = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _lowercase : Any = 256047 _lowercase : List[str] = 256145 @require_sentencepiece @require_tokenizers class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = NllbTokenizer UpperCamelCase__ = NllbTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = True UpperCamelCase__ = {} def SCREAMING_SNAKE_CASE_ ( self : Any ): super().setUp() # We have a SentencePiece fixture for testing lowercase_ : Union[str, Any] = NllbTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = NllbTokenizer(lowercase_ , keep_accents=lowercase_ ) lowercase_ : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowercase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase_ : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase_ : Optional[int] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase_ : Tuple = tempfile.mkdtemp() lowercase_ : List[Any] = tokenizer_r.save_pretrained(lowercase_ ) lowercase_ : Dict = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) lowercase_ : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way lowercase_ : Dict = tokenizer_r.from_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=True lowercase_ : List[Any] = tempfile.mkdtemp() lowercase_ : Tuple = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) lowercase_ : Union[str, Any] = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way lowercase_ : Any = tokenizer_r.from_pretrained(lowercase_ ) lowercase_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=False lowercase_ : int = tempfile.mkdtemp() lowercase_ : Any = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) lowercase_ : Tuple = tokenizer_p.save_pretrained(lowercase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowercase_ : List[Any] = tokenizer_r.from_pretrained(lowercase_ ) lowercase_ : Any = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : str ): if not self.test_seqaseq: return lowercase_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. lowercase_ : Any = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] lowercase_ : Any = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: lowercase_ : List[Any] = tokenizer.prepare_seqaseq_batch( src_texts=lowercase_ , tgt_texts=lowercase_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified lowercase_ : int = tokenizer.prepare_seqaseq_batch( lowercase_ , tgt_texts=lowercase_ , max_length=3 , return_tensors="""pt""" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) lowercase_ : List[Any] = tokenizer.prepare_seqaseq_batch( src_texts=lowercase_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("""decoder_input_ids""" , lowercase_ ) @unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Any ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Dict = [AddedToken("""<special>""" , lstrip=lowercase_ )] lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ ) lowercase_ : Tuple = tokenizer_r.encode("""Hey this is a <special> token""" ) lowercase_ : List[Any] = tokenizer_r.encode("""<special>""" , add_special_tokens=lowercase_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: lowercase_ : int = self.rust_tokenizer_class.from_pretrained( lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) lowercase_ : str = self.tokenizer_class.from_pretrained( lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ ) lowercase_ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" ) lowercase_ : int = tokenizer_cr.encode("""Hey this is a <special> token""" ) self.assertEqual(lowercase_ , lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): UpperCamelCase__ = '''facebook/nllb-200-distilled-600M''' UpperCamelCase__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] UpperCamelCase__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] UpperCamelCase__ = [ 25_6047, 1_6297, 13_4408, 8165, 24_8066, 1_4734, 950, 1135, 10_5721, 3573, 83, 2_7352, 108, 4_9486, 2, ] @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] ): lowercase_ : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" ) lowercase_ : Any = 1 return cls def SCREAMING_SNAKE_CASE_ ( self : Tuple ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): self.assertIn(lowercase_ , self.tokenizer.all_special_ids ) # fmt: off lowercase_ : List[str] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: on lowercase_ : Optional[Any] = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ ) lowercase_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.assertNotIn(self.tokenizer.eos_token , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Tuple = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , lowercase_ ) lowercase_ : List[str] = 10 lowercase_ : Dict = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , lowercase_ ) self.assertEqual(len(lowercase_ ) , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256203, 3] ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Optional[Any] = tempfile.mkdtemp() lowercase_ : Any = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase_ ) lowercase_ : Tuple = NllbTokenizer.from_pretrained(lowercase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) lowercase_ : Tuple = shift_tokens_right( batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) lowercase_ : Any = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowercase_ ) self.assertEqual(lowercase_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[Any] = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors="""pt""" ) lowercase_ : List[Any] = self.tokenizer( text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors="""pt""" ) lowercase_ : Optional[Any] = targets["""input_ids"""] lowercase_ : int = shift_tokens_right( lowercase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( nested_simplify(lowercase_ ) , { # A, test, EOS, en_XX """input_ids""": [[256047, 70, 7356, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 256057, } , ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Any = True lowercase_ : Optional[int] = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] ) lowercase_ : List[str] = False lowercase_ : Optional[Any] = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
21
'''simple docstring''' class __magic_name__ : def __init__( self : int , lowercase_ : list ): lowercase_ : Dict = set_counts lowercase_ : List[Any] = max(lowercase_ ) lowercase_ : str = len(lowercase_ ) lowercase_ : str = [1] * num_sets lowercase_ : Dict = list(range(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : List[Any] = self.get_parent(lowercase_ ) lowercase_ : Union[str, Any] = self.get_parent(lowercase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : List[str] = 0 lowercase_ : Optional[int] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : int = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : int = 0 lowercase_ : List[Any] = src_parent lowercase_ : List[Any] = self.set_counts[src_parent] lowercase_ : Tuple = max(self.max_set , lowercase_ ) return True def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ): if self.parents[disj_set] == disj_set: return disj_set lowercase_ : int = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
21
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase : Optional[Any] = 16 _lowercase : str = 32 def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : str = "bert-base-cased" ) -> List[str]: lowercase_ : List[str] = AutoTokenizer.from_pretrained(UpperCAmelCase__ ) lowercase_ : List[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCAmelCase__ : Tuple ): # max_length=None => use the model max length (it's actually the default) lowercase_ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase_ : List[str] = datasets.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=UpperCAmelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCAmelCase__ : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(UpperCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowercase_ : Any = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) lowercase_ : int = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) return train_dataloader, eval_dataloader def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ) -> str: # Initialize accelerator lowercase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ : Any = config["""lr"""] lowercase_ : str = int(config["""num_epochs"""] ) lowercase_ : Tuple = int(config["""seed"""] ) lowercase_ : Optional[Any] = int(config["""batch_size"""] ) lowercase_ : List[Any] = args.model_name_or_path set_seed(UpperCAmelCase__ ) lowercase_ , lowercase_ : Dict = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ : int = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ ) # Instantiate optimizer lowercase_ : Dict = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase_ : List[Any] = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ ) if accelerator.state.deepspeed_plugin is not None: lowercase_ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowercase_ : int = 1 lowercase_ : Union[str, Any] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase_ : List[str] = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , ) else: lowercase_ : Optional[int] = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = accelerator.prepare( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # We need to keep track of how many total steps we have iterated over lowercase_ : str = 0 # We also need to keep track of the stating epoch so files are named properly lowercase_ : str = 0 # Now we train the model lowercase_ : int = evaluate.load("""glue""" , """mrpc""" ) lowercase_ : Tuple = 0 lowercase_ : Tuple = {} for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ): model.train() for step, batch in enumerate(UpperCAmelCase__ ): lowercase_ : Optional[Any] = model(**UpperCAmelCase__ ) lowercase_ : int = outputs.loss lowercase_ : List[str] = loss / gradient_accumulation_steps accelerator.backward(UpperCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowercase_ : Tuple = 0 for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ : Tuple = model(**UpperCAmelCase__ ) lowercase_ : int = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowercase_ , lowercase_ : Dict = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCAmelCase__ ) - 1: lowercase_ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase_ : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , ) lowercase_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ ) lowercase_ : Tuple = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: lowercase_ : List[Any] = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( ) -> int: lowercase_ : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=UpperCAmelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCAmelCase__ , ) parser.add_argument( """--output_dir""" , type=UpperCAmelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=UpperCAmelCase__ , default=3 , help="""Number of train epochs.""" , ) lowercase_ : Union[str, Any] = parser.parse_args() lowercase_ : int = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": main()
21
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """decord""" ) self.check_model_type(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ): lowercase_ : Union[str, Any] = {} if frame_sampling_rate is not None: lowercase_ : Any = frame_sampling_rate if num_frames is not None: lowercase_ : Optional[Any] = num_frames lowercase_ : Union[str, Any] = {} if top_k is not None: lowercase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ): if num_frames is None: lowercase_ : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content ) lowercase_ : Optional[Any] = VideoReader(lowercase_ ) videoreader.seek(0 ) lowercase_ : Tuple = 0 lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1 lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa ) lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy() lowercase_ : Union[str, Any] = list(lowercase_ ) lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ): lowercase_ : int = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase_ : str = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
21
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : str = logging.get_logger(__name__) _lowercase : str = "▁" _lowercase : int = {"vocab_file": "sentencepiece.bpe.model"} _lowercase : Any = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", } } _lowercase : int = { "facebook/xglm-564M": 2048, } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self : str , lowercase_ : Optional[int] , lowercase_ : List[Any]="<s>" , lowercase_ : Tuple="</s>" , lowercase_ : List[Any]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Tuple="<unk>" , lowercase_ : Any="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Union[str, Any] , ): lowercase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase_ : List[str] = 7 lowercase_ : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] lowercase_ : Optional[Any] = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) lowercase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase_ ) ) lowercase_ : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase_ : Optional[Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase_ : int = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} lowercase_ : Dict = len(self.sp_model ) lowercase_ : Dict = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(lowercase_ ) lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Optional[int] ): lowercase_ : Optional[Any] = self.__dict__.copy() lowercase_ : int = None lowercase_ : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , lowercase_ : Tuple ): lowercase_ : int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase_ : Union[str, Any] = {} lowercase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase_ : str = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) if token_ids_a is None: return [1] + ([0] * len(lowercase_ )) return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): lowercase_ : Tuple = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def SCREAMING_SNAKE_CASE_ ( self : str ): return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : str = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str ): return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[Any] ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase_ : int = self.sp_model.PieceToId(lowercase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[Any] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Union[str, Any] ): lowercase_ : str = """""".join(lowercase_ ).replace(lowercase_ , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ): if not os.path.isdir(lowercase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , """wb""" ) as fi: lowercase_ : List[Any] = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
21
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
1
'''simple docstring''' class __magic_name__ : def __init__( self : Optional[Any] ): lowercase_ : Tuple = {} def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): print(self.vertex ) for i in self.vertex: print(lowercase_ , """ -> """ , """ -> """.join([str(lowercase_ ) for j in self.vertex[i]] ) ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : int , lowercase_ : int ): # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(lowercase_ ) else: # else make a new vertex lowercase_ : str = [to_vertex] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): # visited array for storing already visited nodes lowercase_ : Union[str, Any] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : int , lowercase_ : list ): # mark start vertex as visited lowercase_ : Tuple = True print(lowercase_ , end=""" """ ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowercase : Dict = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
21
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ): lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : List[str] = parent lowercase_ : Any = batch_size lowercase_ : Optional[Any] = num_channels lowercase_ : Tuple = image_size lowercase_ : Optional[Any] = min_resolution lowercase_ : Dict = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[Any] = size lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass def lowerCamelCase ( ) -> Any: lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : Any = Image.open(dataset[4]["""file"""] ) lowercase_ : Dict = Image.open(dataset[5]["""file"""] ) lowercase_ : int = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Optional[int] = prepare_images() # test non-batched lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : Tuple = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : Union[str, Any] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
21
1
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class __magic_name__ : UpperCamelCase__ = None UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None UpperCamelCase__ = 1 UpperCamelCase__ = None UpperCamelCase__ = False UpperCamelCase__ = None UpperCamelCase__ = None def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return self.__class__(**{k: copy.deepcopy(lowercase_ ) for k, v in self.__dict__.items()} )
21
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
21
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class __magic_name__ ( unittest.TestCase): UpperCamelCase__ = MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" ) # Using `do_sample=False` to force deterministic output lowercase_ : Union[str, Any] = text_generator("""This is a test""" , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.""" """ oscope. FiliFili@@""" ) } ] , ) lowercase_ : Optional[Any] = text_generator(["""This is a test""", """This is a second test"""] ) self.assertEqual( lowercase_ , [ [ { """generated_text""": ( """This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.""" """ oscope. FiliFili@@""" ) } ], [ { """generated_text""": ( """This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy""" """ oscope. oscope. FiliFili@@""" ) } ], ] , ) lowercase_ : Dict = text_generator("""This is a test""" , do_sample=lowercase_ , num_return_sequences=2 , return_tensors=lowercase_ ) self.assertEqual( lowercase_ , [ {"""generated_token_ids""": ANY(lowercase_ )}, {"""generated_token_ids""": ANY(lowercase_ )}, ] , ) lowercase_ : Any = text_generator.model.config.eos_token_id lowercase_ : List[Any] = """<pad>""" lowercase_ : Dict = text_generator( ["""This is a test""", """This is a second test"""] , do_sample=lowercase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase_ , ) self.assertEqual( lowercase_ , [ [ {"""generated_token_ids""": ANY(lowercase_ )}, {"""generated_token_ids""": ANY(lowercase_ )}, ], [ {"""generated_token_ids""": ANY(lowercase_ )}, {"""generated_token_ids""": ANY(lowercase_ )}, ], ] , ) @require_tf def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" ) # Using `do_sample=False` to force deterministic output lowercase_ : Tuple = text_generator("""This is a test""" , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵""" """ please,""" ) } ] , ) lowercase_ : Optional[Any] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ [ { """generated_text""": ( """This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵""" """ please,""" ) } ], [ { """generated_text""": ( """This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes""" """ Cannes 閲閲Cannes Cannes Cannes 攵 please,""" ) } ], ] , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : int ): lowercase_ : Tuple = TextGenerationPipeline(model=lowercase_ , tokenizer=lowercase_ ) return text_generator, ["This is a test", "Another test"] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[int] = """Hello I believe in""" lowercase_ : Union[str, Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" ) lowercase_ : Union[str, Any] = text_generator(lowercase_ ) self.assertEqual( lowercase_ , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , ) lowercase_ : int = text_generator(lowercase_ , stop_sequence=""" fe""" ) self.assertEqual(lowercase_ , [{"""generated_text""": """Hello I believe in fe"""}] ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Any , lowercase_ : Dict ): lowercase_ : Any = text_generator.model lowercase_ : Tuple = text_generator.tokenizer lowercase_ : str = text_generator("""This is a test""" ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) ) lowercase_ : str = text_generator("""This is a test""" , return_full_text=lowercase_ ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] ) lowercase_ : Tuple = pipeline(task="""text-generation""" , model=lowercase_ , tokenizer=lowercase_ , return_full_text=lowercase_ ) lowercase_ : Tuple = text_generator("""This is a test""" ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] ) lowercase_ : Optional[Any] = text_generator("""This is a test""" , return_full_text=lowercase_ ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) ) lowercase_ : int = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ [{"""generated_text""": ANY(lowercase_ )}, {"""generated_text""": ANY(lowercase_ )}], [{"""generated_text""": ANY(lowercase_ )}, {"""generated_text""": ANY(lowercase_ )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowercase_ : Optional[Any] = text_generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase_ ) self.assertEqual( lowercase_ , [ [{"""generated_text""": ANY(lowercase_ )}, {"""generated_text""": ANY(lowercase_ )}], [{"""generated_text""": ANY(lowercase_ )}, {"""generated_text""": ANY(lowercase_ )}], ] , ) with self.assertRaises(lowercase_ ): lowercase_ : Dict = text_generator("""test""" , return_full_text=lowercase_ , return_text=lowercase_ ) with self.assertRaises(lowercase_ ): lowercase_ : Union[str, Any] = text_generator("""test""" , return_full_text=lowercase_ , return_tensors=lowercase_ ) with self.assertRaises(lowercase_ ): lowercase_ : Optional[int] = text_generator("""test""" , return_text=lowercase_ , return_tensors=lowercase_ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowercase_ : Tuple = text_generator("""""" ) self.assertEqual(lowercase_ , [{"""generated_text""": ANY(lowercase_ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowercase_ : List[Any] = text_generator("""""" ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowercase_ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator("""This is a test""" * 500 , max_new_tokens=20 ) lowercase_ : Union[str, Any] = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(lowercase_ ): text_generator( """This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : Any ): import torch # Classic `model_kwargs` lowercase_ : Any = pipeline( model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowercase_ : int = pipe("""This is a test""" ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test test test test test test test test test test test test test test test test""" """ test""" ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowercase_ : Tuple = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowercase_ : Tuple = pipe("""This is a test""" ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test test test test test test test test test test test test test test test test""" """ test""" ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowercase_ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowercase_ : Optional[Any] = pipe("""This is a test""" ) self.assertEqual( lowercase_ , [ { """generated_text""": ( """This is a test test test test test test test test test test test test test test test test""" """ test""" ) } ] , ) @require_torch @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): import torch lowercase_ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa ) pipe("""This is a test""" ) @require_torch @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): import torch lowercase_ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa ) pipe("""This is a test""" , do_sample=lowercase_ , top_p=0.5 ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Union[str, Any] = """Hello world""" lowercase_ : Any = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" ) if text_generator.model.framework == "tf": lowercase_ : int = logging.get_logger("""transformers.generation.tf_utils""" ) else: lowercase_ : Union[str, Any] = logging.get_logger("""transformers.generation.utils""" ) lowercase_ : List[Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(lowercase_ ) as cl: lowercase_ : List[Any] = text_generator(lowercase_ , max_length=10 , max_new_tokens=1 ) self.assertIn(lowercase_ , cl.out ) # The user only sets one -> no warning with CaptureLogger(lowercase_ ) as cl: lowercase_ : List[str] = text_generator(lowercase_ , max_new_tokens=1 ) self.assertNotIn(lowercase_ , cl.out ) with CaptureLogger(lowercase_ ) as cl: lowercase_ : Optional[Any] = text_generator(lowercase_ , max_length=10 ) self.assertNotIn(lowercase_ , cl.out )
21
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): if audio_length_in_s is None: lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate lowercase_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowercase_ : List[Any] = int(lowercase_ ) if sample_size % down_scale_factor != 0: lowercase_ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowercase_ : Any = int(lowercase_ ) lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) # set step values self.scheduler.set_timesteps(lowercase_ , device=audio.device ) lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase_ )
21
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = RobertaTokenizer UpperCamelCase__ = RobertaTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = {'''cls_token''': '''<s>'''} def SCREAMING_SNAKE_CASE_ ( self : List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowercase_ : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase_ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowercase_ : Dict = {"""unk_token""": """<unk>"""} lowercase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowercase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : str , **lowercase_ : int ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : int ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[Any] ): lowercase_ : Dict = """lower newer""" lowercase_ : int = """lower newer""" return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase_ : Tuple = """lower newer""" lowercase_ : Dict = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] lowercase_ : List[str] = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True) self.assertListEqual(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = tokens + [tokenizer.unk_token] lowercase_ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[str] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowercase_ ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowercase_ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[Any] = self.tokenizer_class.from_pretrained("""roberta-base""" ) lowercase_ : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Tuple = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.encode( """sequence builders""" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ ) lowercase_ : Optional[Any] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ ) lowercase_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_tokenizer() lowercase_ : Tuple = """Encode this sequence.""" lowercase_ : str = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments lowercase_ : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ ) lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowercase_ , lowercase_ ) lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ ) lowercase_ : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowercase_ , lowercase_ ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) lowercase_ : Dict = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowercase_ , lowercase_ ) # Testing spaces after special tokens lowercase_ : Dict = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space lowercase_ : Dict = tokenizer.convert_tokens_to_ids(lowercase_ ) lowercase_ : Dict = """Encode <mask> sequence""" lowercase_ : int = """Encode <mask>sequence""" lowercase_ : Optional[Any] = tokenizer.encode(lowercase_ ) lowercase_ : List[str] = encoded.index(lowercase_ ) lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowercase_ , lowercase_ ) lowercase_ : List[Any] = tokenizer.encode(lowercase_ ) lowercase_ : Optional[Any] = encoded.index(lowercase_ ) lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): pass def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase_ : Tuple = """A, <mask> AllenNLP sentence.""" lowercase_ : str = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase_ : Union[str, Any] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) lowercase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) lowercase_ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ ) lowercase_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowercase_ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowercase_ ) self.assertEqual(post_processor_state["""add_prefix_space"""] , lowercase_ ) self.assertEqual(post_processor_state["""trim_offsets"""] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` lowercase_ : List[str] = f'''{text_of_1_token} {text_of_1_token}''' lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ ) lowercase_ : List[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , ) lowercase_ : Tuple = self.rust_tokenizer_class.from_pretrained( lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ ) lowercase_ : Optional[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , ) lowercase_ : int = self.rust_tokenizer_class.from_pretrained( lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ ) lowercase_ : int = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , ) lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained( lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ ) lowercase_ : Union[str, Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , ) lowercase_ : Union[str, Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ ) lowercase_ : List[str] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , ) lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ ) lowercase_ : List[str] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , ) lowercase_ : Any = self.rust_tokenizer_class.from_pretrained( lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ ) lowercase_ : Any = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
21
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Union[str, Any] = "src/transformers" _lowercase : str = "docs/source/en" _lowercase : Union[str, Any] = "." def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int: with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Union[str, Any] = f.readlines() # Find the start prompt. lowercase_ : Optional[Any] = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 lowercase_ : int = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ ) return [m.group(0 ) for m in matches] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]: lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ ) lowercase_ : List[str] = (width - text_length) // 2 lowercase_ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) -> Any: lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = None if attr_name.endswith("""Tokenizer""" ): lowercase_ : Optional[int] = slow_tokenizers lowercase_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowercase_ : Optional[Any] = fast_tokenizers lowercase_ : Dict = attr_name[:-13] elif _re_tf_models.match(UpperCAmelCase__ ) is not None: lowercase_ : str = tf_models lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0] elif _re_flax_models.match(UpperCAmelCase__ ) is not None: lowercase_ : List[str] = flax_models lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0] elif _re_pt_models.match(UpperCAmelCase__ ) is not None: lowercase_ : Tuple = pt_models lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] ) # Let's build that table! lowercase_ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns] lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowercase_ : int = {True: """✅""", False: """❌"""} for name in model_names: lowercase_ : str = model_name_to_prefix[name] lowercase_ : Any = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n" return table def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str: lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file( filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowercase_ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ): lowercase_ : Optional[Any] = {} lowercase_ : Tuple = {} if prompt is not None: lowercase_ : Tuple = prompt if generate_kwargs is not None: lowercase_ : List[str] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase_ : List[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase_ : str = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ): lowercase_ : List[Any] = load_image(lowercase_ ) if prompt is not None: if not isinstance(lowercase_ , lowercase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase_ : List[Any] = self.model.config.model_type if model_type == "git": lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase_ : str = None return model_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowercase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase_ : Any = None if generate_kwargs is None: lowercase_ : Optional[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase_ : Dict = model_inputs.pop(self.model.main_input_name ) lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ): lowercase_ : List[str] = [] for output_ids in model_outputs: lowercase_ : Union[str, Any] = { """generated_text""": self.tokenizer.decode( lowercase_ , skip_special_tokens=lowercase_ , ) } records.append(lowercase_ ) return records
21
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __magic_name__ ( ctypes.Structure): # _fields is a specific attr expected by ctypes UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def lowerCamelCase ( ) -> List[Any]: if os.name == "nt": lowercase_ : List[Any] = CursorInfo() lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> str: if os.name == "nt": lowercase_ : int = CursorInfo() lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> Any: try: hide_cursor() yield finally: show_cursor()
21
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : str = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''falcon''' UpperCamelCase__ = ['''past_key_values'''] def __init__( self : Optional[Any] , lowercase_ : Tuple=65024 , lowercase_ : List[str]=4544 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=71 , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Dict=True , lowercase_ : str=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Optional[Any]=None , lowercase_ : Dict=False , lowercase_ : str=False , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : str=False , lowercase_ : Optional[int]=11 , lowercase_ : Tuple=11 , **lowercase_ : List[str] , ): lowercase_ : Union[str, Any] = vocab_size # Backward compatibility with n_embed kwarg lowercase_ : Optional[int] = kwargs.pop("""n_embed""" , lowercase_ ) lowercase_ : Any = hidden_size if n_embed is None else n_embed lowercase_ : Any = num_hidden_layers lowercase_ : Optional[int] = num_attention_heads lowercase_ : Union[str, Any] = layer_norm_epsilon lowercase_ : int = initializer_range lowercase_ : Optional[int] = use_cache lowercase_ : int = hidden_dropout lowercase_ : Any = attention_dropout lowercase_ : List[str] = bos_token_id lowercase_ : Tuple = eos_token_id lowercase_ : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads lowercase_ : Dict = alibi lowercase_ : Any = new_decoder_architecture lowercase_ : Dict = multi_query # Ignored when new_decoder_architecture is True lowercase_ : Optional[Any] = parallel_attn lowercase_ : List[str] = bias super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return self.hidden_size // self.num_attention_heads @property def SCREAMING_SNAKE_CASE_ ( self : str ): return not self.alibi
21
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Optional[int] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**lowercase_ ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''}) UpperCamelCase__ = field( default='''O1''', metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase_ : Optional[Any] = torch.device("""cpu""" ) lowercase_ : Tuple = 0 elif is_torch_tpu_available(): lowercase_ : Optional[int] = xm.xla_device() lowercase_ : str = 0 else: lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.n_gpu > 0
21
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _lowercase : List[Any] = logging.get_logger(__name__) _lowercase : Optional[int] = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''blenderbot-small''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Optional[int] , lowercase_ : Any=50265 , lowercase_ : Tuple=512 , lowercase_ : Tuple=8 , lowercase_ : Optional[int]=2048 , lowercase_ : Optional[int]=16 , lowercase_ : Dict=8 , lowercase_ : str=2048 , lowercase_ : List[Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : int=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=True , lowercase_ : List[Any]="gelu" , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Any=1 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : int=2 , lowercase_ : Tuple=2 , **lowercase_ : List[Any] , ): lowercase_ : Optional[int] = vocab_size lowercase_ : Union[str, Any] = max_position_embeddings lowercase_ : List[Any] = d_model lowercase_ : Optional[int] = encoder_ffn_dim lowercase_ : List[Any] = encoder_layers lowercase_ : List[str] = encoder_attention_heads lowercase_ : List[Any] = decoder_ffn_dim lowercase_ : Optional[Any] = decoder_layers lowercase_ : List[str] = decoder_attention_heads lowercase_ : int = dropout lowercase_ : Optional[int] = attention_dropout lowercase_ : int = activation_dropout lowercase_ : int = activation_function lowercase_ : Any = init_std lowercase_ : str = encoder_layerdrop lowercase_ : List[str] = decoder_layerdrop lowercase_ : Optional[int] = use_cache lowercase_ : Any = encoder_layers lowercase_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : int ): if self.task in ["default", "seq2seq-lm"]: lowercase_ : List[Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: lowercase_ : Any = {0: """batch"""} lowercase_ : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: lowercase_ : List[str] = {0: """batch""", 1: """decoder_sequence"""} lowercase_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. lowercase_ : List[str] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: lowercase_ , lowercase_ : Optional[Any] = self.num_layers for i in range(lowercase_ ): lowercase_ : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""} lowercase_ : Any = {0: """batch""", 2: """past_sequence + sequence"""} else: lowercase_ : str = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE_ ( self : str ): if self.task in ["default", "seq2seq-lm"]: lowercase_ : List[str] = super().outputs else: lowercase_ : str = super(lowercase_ , self ).outputs if self.use_past: lowercase_ , lowercase_ : int = self.num_layers for i in range(lowercase_ ): lowercase_ : str = {0: """batch""", 2: """past_sequence + sequence"""} lowercase_ : str = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): lowercase_ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs lowercase_ : Dict = seq_length if not self.use_past else 1 lowercase_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase_ : Any = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} lowercase_ : Optional[int] = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowercase_ , lowercase_ : Optional[int] = common_inputs["""input_ids"""].shape lowercase_ : str = common_inputs["""decoder_input_ids"""].shape[1] lowercase_ , lowercase_ : Optional[int] = self.num_attention_heads lowercase_ : Optional[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase_ : Dict = decoder_seq_length + 3 lowercase_ : Tuple = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowercase_ : Optional[int] = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) lowercase_ : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowercase_ , lowercase_ : Dict = self.num_layers lowercase_ : Any = min(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = max(lowercase_ , lowercase_ ) - min_num_layers lowercase_ : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. lowercase_ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): lowercase_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowercase_ , lowercase_ : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowercase_ : List[str] = seqlen + 2 lowercase_ , lowercase_ : List[str] = self.num_layers lowercase_ , lowercase_ : Optional[Any] = self.num_attention_heads lowercase_ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase_ : Optional[Any] = common_inputs["""attention_mask"""].dtype lowercase_ : List[Any] = torch.cat( [common_inputs["""attention_mask"""], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) lowercase_ : List[Any] = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase_ : Tuple = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase_ : Dict = tokenizer.num_special_tokens_to_add(lowercase_ ) lowercase_ : Optional[Any] = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence lowercase_ : List[str] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size lowercase_ : str = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: lowercase_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) elif self.task == "causal-lm": lowercase_ : List[Any] = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: lowercase_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str ): if self.task in ["default", "seq2seq-lm"]: lowercase_ : Tuple = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: lowercase_ : List[Any] = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ )
21
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' import random from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> list[Any]: for _ in range(len(UpperCAmelCase__ ) ): lowercase_ : Any = random.randint(0 , len(UpperCAmelCase__ ) - 1 ) lowercase_ : str = random.randint(0 , len(UpperCAmelCase__ ) - 1 ) lowercase_ , lowercase_ : Any = data[b], data[a] return data if __name__ == "__main__": _lowercase : str = [0, 1, 2, 3, 4, 5, 6, 7] _lowercase : Tuple = ["python", "says", "hello", "!"] print("Fisher-Yates Shuffle:") print("List", integers, strings) print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
21
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
1
'''simple docstring''' from torch import nn class __magic_name__ ( nn.Module): def __init__( self : Any , lowercase_ : Optional[int] , lowercase_ : Any ): super().__init__() lowercase_ : Optional[Any] = class_size lowercase_ : List[str] = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowercase_ : Dict = nn.Linear(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[str] ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowercase_ : Any = self.mlp(lowercase_ ) return logits
21
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: lowercase_ : Any = prime_factors(UpperCAmelCase__ ) if is_square_free(UpperCAmelCase__ ): return -1 if len(UpperCAmelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' from math import loga def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("""Input value must be a 'int' type""" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Optional[int] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**lowercase_ ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''}) UpperCamelCase__ = field( default='''O1''', metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase_ : Optional[Any] = torch.device("""cpu""" ) lowercase_ : Tuple = 0 elif is_torch_tpu_available(): lowercase_ : Optional[int] = xm.xla_device() lowercase_ : str = 0 else: lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.n_gpu > 0
21
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
21
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : int = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) lowercase_ : int = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowercase_ : Optional[Any] = model(lowercase_ )["""last_hidden_state"""] lowercase_ : Tuple = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , lowercase_ ) # compare the actual values for a slice. lowercase_ : Optional[int] = tf.convert_to_tensor( [[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
21
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]: # Initialise PyTorch model lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _lowercase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
21
1
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : list[int] ) -> int: if not numbers: return 0 if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all( isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) lowercase_ : List[str] = numbers[0] for i in range(1 , len(UpperCAmelCase__ ) ): # update the maximum and minimum subarray products lowercase_ : Tuple = numbers[i] if number < 0: lowercase_ , lowercase_ : Optional[Any] = min_till_now, max_till_now lowercase_ : Union[str, Any] = max(UpperCAmelCase__ , max_till_now * number ) lowercase_ : List[Any] = min(UpperCAmelCase__ , min_till_now * number ) # update the maximum product found till now lowercase_ : List[str] = max(UpperCAmelCase__ , UpperCAmelCase__ ) return max_prod
21
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : str = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : Optional[int] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "PIL.Image.Image" UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Tuple ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : int = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Union[str, Any] = {} lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : int = PIL.Image.open(lowercase_ ) else: lowercase_ : str = path.split("""::""" )[-1] try: lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : str = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Dict = BytesIO(f.read() ) lowercase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : Optional[int] = storage.field("""bytes""" ) else: lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : Dict = storage.field("""path""" ) else: lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : Optional[int] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Tuple = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Optional[Any] ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : int = f.read() return bytes_ lowercase_ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : Any = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : Tuple = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : List[Any] = array.dtype lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Dict = dtype.kind lowercase_ : List[Any] = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : int = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : str = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
21
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Tuple ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Any ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : int ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : int , *lowercase_ : Any , **lowercase_ : Optional[Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : List[Any] , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : str ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Optional[Any] , *lowercase_ : str , **lowercase_ : Optional[int] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : List[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : List[str] , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : List[Any] , *lowercase_ : int , **lowercase_ : Dict ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : List[str] , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : int , **lowercase_ : Any ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : Dict ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : Optional[int] , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Optional[int] , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Optional[int] , *lowercase_ : Any , **lowercase_ : Dict ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Any , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Any , *lowercase_ : Any , **lowercase_ : List[Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Any , **lowercase_ : str ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : Union[str, Any] , **lowercase_ : int ): requires_backends(cls , ["""torch"""] ) def lowerCamelCase ( *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]: requires_backends(UpperCAmelCase__ , ["""torch"""] ) def lowerCamelCase ( *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Union[str, Any] ) -> str: requires_backends(UpperCAmelCase__ , ["""torch"""] ) def lowerCamelCase ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Any ) -> Optional[Any]: requires_backends(UpperCAmelCase__ , ["""torch"""] ) def lowerCamelCase ( *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[int] ) -> str: requires_backends(UpperCAmelCase__ , ["""torch"""] ) def lowerCamelCase ( *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> Optional[int]: requires_backends(UpperCAmelCase__ , ["""torch"""] ) def lowerCamelCase ( *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> List[str]: requires_backends(UpperCAmelCase__ , ["""torch"""] ) def lowerCamelCase ( *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any] ) -> int: requires_backends(UpperCAmelCase__ , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Any , *lowercase_ : List[Any] , **lowercase_ : Any ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : str , **lowercase_ : List[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : str , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Dict , *lowercase_ : Any , **lowercase_ : Dict ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Any , **lowercase_ : int ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : str , **lowercase_ : str ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Union[str, Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Any , **lowercase_ : List[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : int , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : str , *lowercase_ : Dict , **lowercase_ : int ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : List[Any] , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Tuple , *lowercase_ : int , **lowercase_ : Union[str, Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Any , *lowercase_ : List[Any] , **lowercase_ : Optional[int] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : int , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Tuple ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Any ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : int , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : Dict ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Tuple , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Dict , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : int , *lowercase_ : int , **lowercase_ : Tuple ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Tuple , **lowercase_ : int ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Dict ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : int , **lowercase_ : Any ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Dict , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Optional[int] , **lowercase_ : Any ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : int ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : str ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Dict , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : str ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : List[str] , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : str , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Tuple , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Tuple , *lowercase_ : List[str] , **lowercase_ : str ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : str , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : str , **lowercase_ : str ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Any , *lowercase_ : Tuple , **lowercase_ : List[str] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Optional[int] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : List[Any] , **lowercase_ : Any ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : Tuple ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : str ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : Union[str, Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Any , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Tuple , *lowercase_ : Tuple , **lowercase_ : int ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Tuple , **lowercase_ : List[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : int , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : List[Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Optional[int] , *lowercase_ : Union[str, Any] , **lowercase_ : str ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Optional[int] , **lowercase_ : int ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : str , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Tuple , *lowercase_ : Tuple , **lowercase_ : str ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Tuple , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : List[Any] , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Any , *lowercase_ : List[Any] , **lowercase_ : Any ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : int ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Optional[int] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Optional[Any] , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Dict , **lowercase_ : List[Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : str , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : int , **lowercase_ : str ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Dict , *lowercase_ : Any , **lowercase_ : Optional[int] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Any ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : List[str] , **lowercase_ : Dict ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : Any ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : str , **lowercase_ : str ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : Any ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : str ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Any , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : int ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : str ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : str , **lowercase_ : Optional[Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : str , **lowercase_ : int ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Dict , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : Optional[int] ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : List[str] , **lowercase_ : Tuple ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Any , *lowercase_ : List[Any] , **lowercase_ : str ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : str , **lowercase_ : Optional[int] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : int , **lowercase_ : List[str] ): requires_backends(cls , ["""torch"""] ) class __magic_name__ ( metaclass=_UpperCAmelCase): UpperCamelCase__ = ['''torch'''] def __init__( self : Dict , *lowercase_ : List[Any] , **lowercase_ : Any ): requires_backends(self , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ): requires_backends(cls , ["""torch"""] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Any , **lowercase_ : Union[str, Any] ): requires_backends(cls , ["""torch"""] )
21
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
21
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : List[str] = logging.get_logger(__name__) _lowercase : Optional[int] = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''decision_transformer''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[Any] , lowercase_ : Dict=17 , lowercase_ : Tuple=4 , lowercase_ : Optional[int]=128 , lowercase_ : Optional[Any]=4096 , lowercase_ : Tuple=True , lowercase_ : Dict=1 , lowercase_ : Tuple=1024 , lowercase_ : Tuple=3 , lowercase_ : Optional[int]=1 , lowercase_ : Optional[int]=None , lowercase_ : List[str]="relu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=1E-5 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=True , lowercase_ : Dict=50256 , lowercase_ : str=50256 , lowercase_ : Tuple=False , lowercase_ : str=False , **lowercase_ : str , ): lowercase_ : Tuple = state_dim lowercase_ : Optional[int] = act_dim lowercase_ : str = hidden_size lowercase_ : Dict = max_ep_len lowercase_ : Any = action_tanh lowercase_ : Union[str, Any] = vocab_size lowercase_ : List[str] = n_positions lowercase_ : Optional[int] = n_layer lowercase_ : Tuple = n_head lowercase_ : Any = n_inner lowercase_ : Optional[Any] = activation_function lowercase_ : List[str] = resid_pdrop lowercase_ : str = embd_pdrop lowercase_ : Dict = attn_pdrop lowercase_ : List[Any] = layer_norm_epsilon lowercase_ : Dict = initializer_range lowercase_ : Tuple = scale_attn_weights lowercase_ : Optional[Any] = use_cache lowercase_ : Optional[int] = scale_attn_by_inverse_layer_idx lowercase_ : Any = reorder_and_upcast_attn lowercase_ : Optional[Any] = bos_token_id lowercase_ : Optional[Any] = eos_token_id super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
21
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = DistilBertTokenizer UpperCamelCase__ = DistilBertTokenizerFast UpperCamelCase__ = True @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
21
1
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> str: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ ) lowercase_ : List[str] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase_ : List[Any] = dataset_size < in_memory_max_size else: lowercase_ : Optional[int] = False lowercase_ : Any = is_small_dataset(UpperCAmelCase__ ) assert result == expected
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __magic_name__ ( _UpperCAmelCase): def __init__( self : List[str] , lowercase_ : Optional[int]=0.01 , lowercase_ : Any=1000 ): lowercase_ : List[Any] = p_stop lowercase_ : List[Any] = max_length def __iter__( self : Any ): lowercase_ : List[str] = 0 lowercase_ : Union[str, Any] = False while not stop and count < self.max_length: yield count count += 1 lowercase_ : Optional[Any] = random.random() < self.p_stop class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str]=False , lowercase_ : List[Any]=True ): lowercase_ : List[str] = [ BatchSamplerShard(lowercase_ , 2 , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) for i in range(2 ) ] lowercase_ : Dict = [list(lowercase_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(lowercase_ ) for shard in batch_sampler_shards] , [len(lowercase_ ) for e in expected] ) self.assertListEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): # Check the shards when the dataset is a round multiple of total batch size. lowercase_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowercase_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowercase_ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is very small. lowercase_ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Any = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # Check the shards when the dataset is a round multiple of batch size. lowercase_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Union[str, Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowercase_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size. lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowercase_ : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowercase_ : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowercase_ : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : str = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) # Check the shards when the dataset is very small. lowercase_ : int = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Tuple = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : List[str] = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): # Check the shards when the dataset is a round multiple of total batch size. lowercase_ : List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowercase_ : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowercase_ : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowercase_ : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is very small. lowercase_ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[str] = [[[0, 1]], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Any = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): # Check the shards when the dataset is a round multiple of batch size. lowercase_ : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Union[str, Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowercase_ : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size. lowercase_ : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowercase_ : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowercase_ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : int = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowercase_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : int = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is very small. lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : int = [[[0, 1]], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowercase_ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : str = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] lowercase_ : str = [BatchSamplerShard(lowercase_ , 2 , lowercase_ , even_batches=lowercase_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple=False , lowercase_ : Dict=2 , lowercase_ : Optional[int]=False ): random.seed(lowercase_ ) lowercase_ : List[str] = list(lowercase_ ) lowercase_ : Optional[Any] = [ IterableDatasetShard( lowercase_ , batch_size=lowercase_ , drop_last=lowercase_ , num_processes=lowercase_ , process_index=lowercase_ , split_batches=lowercase_ , ) for i in range(lowercase_ ) ] lowercase_ : List[str] = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(lowercase_ ) iterable_dataset_lists.append(list(lowercase_ ) ) lowercase_ : Dict = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size lowercase_ : str = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) self.assertTrue(len(lowercase_ ) % shard_batch_size == 0 ) lowercase_ : int = [] for idx in range(0 , len(lowercase_ ) , lowercase_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(lowercase_ ) < len(lowercase_ ): reference += reference self.assertListEqual(lowercase_ , reference[: len(lowercase_ )] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Tuple = 42 lowercase_ : Dict = RandomIterableDataset() self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) # Edge case with a very small dataset lowercase_ : Tuple = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : int = SkipBatchSampler(lowercase_ , 2 ) self.assertListEqual(list(lowercase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 ) lowercase_ : int = skip_first_batches(lowercase_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): Accelerator() lowercase_ : Tuple = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
21
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowercase : str = 16 _lowercase : int = 32 def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 ) -> str: lowercase_ : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowercase_ : List[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCAmelCase__ : str ): # max_length=None => use the model max length (it's actually the default) lowercase_ : int = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase_ : int = datasets.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCAmelCase__ : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase_ : List[Any] = 16 elif accelerator.mixed_precision != "no": lowercase_ : Optional[int] = 8 else: lowercase_ : int = None return tokenizer.pad( UpperCAmelCase__ , padding="""longest""" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowercase_ : str = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) lowercase_ : List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowercase : List[str] = mocked_dataloaders # noqa: F811 def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ) -> Optional[int]: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase__ ) == "1": lowercase_ : Dict = 2 # Initialize accelerator lowercase_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ : Optional[int] = config["""lr"""] lowercase_ : str = int(config["""num_epochs"""] ) lowercase_ : int = int(config["""seed"""] ) lowercase_ : List[str] = int(config["""batch_size"""] ) lowercase_ : Optional[int] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation lowercase_ : str = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowercase_ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE lowercase_ : Dict = MAX_GPU_BATCH_SIZE set_seed(UpperCAmelCase__ ) lowercase_ , lowercase_ : List[Any] = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase_ : Dict = model.to(accelerator.device ) # Instantiate optimizer lowercase_ : Union[str, Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase__ ) # Instantiate scheduler lowercase_ : int = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = accelerator.prepare( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Now we train the model for epoch in range(UpperCAmelCase__ ): model.train() for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase_ : Optional[Any] = model(**UpperCAmelCase__ ) lowercase_ : Dict = outputs.loss lowercase_ : List[Any] = loss / gradient_accumulation_steps accelerator.backward(UpperCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() lowercase_ : List[Any] = 0 for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ : Tuple = model(**UpperCAmelCase__ ) lowercase_ : str = outputs.logits.argmax(dim=-1 ) lowercase_ , lowercase_ : Optional[Any] = accelerator.gather((predictions, batch["""labels"""]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(UpperCAmelCase__ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples lowercase_ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase_ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , ) lowercase_ : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ ) def lowerCamelCase ( ) -> Optional[Any]: lowercase_ : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowercase_ : Any = parser.parse_args() lowercase_ : str = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": main()
21
'''simple docstring''' import os import numpy import onnx def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple: lowercase_ : Tuple = a.name lowercase_ : Tuple = b.name lowercase_ : Any = """""" lowercase_ : List[Any] = """""" lowercase_ : List[Any] = a == b lowercase_ : Union[str, Any] = name_a lowercase_ : Optional[Any] = name_b return res def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]: lowercase_ : int = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : Optional[Any] = inits[i].name lowercase_ : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]: lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ ) lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ ) lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ : List[Any] = list(model.graph.initializer ) lowercase_ : int = set() lowercase_ : int = {} lowercase_ : str = [] lowercase_ : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) lowercase_ : Dict = inits[j].data_type lowercase_ : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size lowercase_ : int = inits[i].name lowercase_ : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowercase_ : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = """optimized_""" + model_file_name lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
21
1