code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[Any] = ['''image_processor''', '''tokenizer'''] _snake_case : Optional[Any] = '''ViTImageProcessor''' _snake_case : Optional[int] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[str]: UpperCAmelCase_ : str = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _UpperCamelCase , ) UpperCAmelCase_ : List[str] = kwargs.pop('feature_extractor' ) UpperCAmelCase_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_UpperCamelCase , _UpperCamelCase ) def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError('You have to specify either text, visual prompt or images.' ) if text is not None and visual_prompt is not None: raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' ) if text is not None: UpperCAmelCase_ : List[str] = self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ) if visual_prompt is not None: UpperCAmelCase_ : Optional[Any] = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ) if images is not None: UpperCAmelCase_ : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ) if visual_prompt is not None and images is not None: UpperCAmelCase_ : Tuple = { 'pixel_values': image_features.pixel_values, 'conditional_pixel_values': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: UpperCAmelCase_ : Any = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: UpperCAmelCase_ : List[Any] = { 'conditional_pixel_values': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase ) def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict: return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple: return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> Any: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCamelCase , ) return self.image_processor_class @property def __UpperCAmelCase ( self ) -> List[str]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCamelCase , ) return self.image_processor
29
def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
29
1
import numpy as np def lowercase__ ( __snake_case : np.array ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
29
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.txt'} __UpperCAmelCase = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __UpperCAmelCase = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __UpperCAmelCase = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : int = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_INIT_CONFIGURATION _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Any = ConvBertTokenizer def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars ): UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) ) UpperCAmelCase_ : str = do_lower_case UpperCAmelCase_ : List[Any] = strip_accents UpperCAmelCase_ : str = tokenize_chinese_chars UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase ) UpperCAmelCase_ : Any = do_lower_case def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]: UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
29
1
from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCAmelCase = logging.get_logger(__name__) # TODO: upload to AWS __UpperCAmelCase = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[Any] = '''retribert''' def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=8 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=True , _UpperCamelCase=1_2_8 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> str: super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase ) UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : Optional[Any] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : int = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : Any = share_encoders UpperCAmelCase_ : int = projection_dim
29
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = '''efficientformer''' def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None: super().__init__(**_UpperCamelCase ) UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = hidden_sizes UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Optional[Any] = depths UpperCAmelCase_ : List[Any] = mlp_expansion_ratio UpperCAmelCase_ : List[str] = downsamples UpperCAmelCase_ : List[Any] = dim UpperCAmelCase_ : Tuple = key_dim UpperCAmelCase_ : Optional[int] = attention_ratio UpperCAmelCase_ : str = resolution UpperCAmelCase_ : Dict = pool_size UpperCAmelCase_ : Union[str, Any] = downsample_patch_size UpperCAmelCase_ : List[str] = downsample_stride UpperCAmelCase_ : List[str] = downsample_pad UpperCAmelCase_ : Any = drop_path_rate UpperCAmelCase_ : Dict = num_metaad_blocks UpperCAmelCase_ : Dict = distillation UpperCAmelCase_ : int = use_layer_scale UpperCAmelCase_ : Any = layer_scale_init_value UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Dict = batch_norm_eps
29
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[PIL.Image.Image, np.ndarray] class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any: super().__init__() self.register_modules( prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: if latents is None: UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : int = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> int: if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str: if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ): UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 ) if not isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 ) UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state'] UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]: if isinstance(_UpperCamelCase , PIL.Image.Image ): UpperCAmelCase_ : Tuple = 1 elif isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : str = image.shape[0] elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase ) else: raise ValueError( f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : Tuple = self._execution_device UpperCAmelCase_ : str = batch_size * num_images_per_prompt UpperCAmelCase_ : str = guidance_scale > 1.0 UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # prior self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ : int = self.scheduler.timesteps UpperCAmelCase_ : int = self.prior.config.num_embeddings UpperCAmelCase_ : Any = self.prior.config.embedding_dim UpperCAmelCase_ : List[str] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : int = self.prior( _UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding # remove the variance UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = [] for i, latent in enumerate(_UpperCamelCase ): print() UpperCAmelCase_ : List[str] = self.renderer.decode( latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase ) if output_type not in ["np", "pil"]: raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" ) UpperCAmelCase_ : Dict = images.cpu().numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images] # Offload last model to CPU if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_UpperCamelCase )
29
1
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=6_4 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=6_4 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ) -> Union[str, Any]: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Any = use_input_mask UpperCAmelCase_ : Optional[Any] = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[Any] = intermediate_size UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : int = type_sequence_label_size UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : List[Any] = num_labels UpperCAmelCase_ : List[Any] = num_choices UpperCAmelCase_ : Any = scope def __UpperCAmelCase ( self ) -> List[str]: return MPNetConfig.from_pretrained('microsoft/mpnet-base' ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Optional[Any] = None if self.use_input_mask: UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : List[Any] = None if self.use_labels: UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ) -> Dict: return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = MPNetModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : List[Any] = model(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : str = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : Any = MPNetForQuestionAnswering(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Dict = model( _UpperCamelCase , attention_mask=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Dict = MPNetForSequenceClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : str = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.num_choices UpperCAmelCase_ : Tuple = MPNetForMultipleChoice(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ : Dict = model( _UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.num_labels UpperCAmelCase_ : Union[str, Any] = MPNetForTokenClassification(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = config_and_inputs UpperCAmelCase_ : int = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Optional[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _snake_case : Optional[Any] = ( { '''feature-extraction''': MPNetModel, '''fill-mask''': MPNetForMaskedLM, '''question-answering''': MPNetForQuestionAnswering, '''text-classification''': MPNetForSequenceClassification, '''token-classification''': MPNetForTokenClassification, '''zero-shot''': MPNetForSequenceClassification, } if is_torch_available() else {} ) _snake_case : Union[str, Any] = False _snake_case : Dict = True def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = MPNetModelTester(self ) UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*_UpperCamelCase ) @require_torch class lowerCamelCase (unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = MPNetModel.from_pretrained('microsoft/mpnet-base' ) UpperCAmelCase_ : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase )[0] UpperCAmelCase_ : int = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , _UpperCamelCase ) UpperCAmelCase_ : Tuple = torch.tensor( [[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
29
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline _snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) _snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __UpperCAmelCase ( self ) -> Optional[Any]: return self._get_superresolution_dummy_components() def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any: if str(_UpperCamelCase ).startswith('mps' ): UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase ) else: UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __UpperCAmelCase ( self ) -> Dict: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __UpperCAmelCase ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __UpperCAmelCase ( self ) -> List[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __UpperCAmelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
29
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch __UpperCAmelCase = logging.get_logger(__name__) @dataclass class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=6.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase="fp4" , _UpperCamelCase=False , **_UpperCamelCase , ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = load_in_abit UpperCAmelCase_ : Any = load_in_abit UpperCAmelCase_ : List[Any] = llm_inta_threshold UpperCAmelCase_ : Tuple = llm_inta_skip_modules UpperCAmelCase_ : Tuple = llm_inta_enable_fpaa_cpu_offload UpperCAmelCase_ : Optional[Any] = llm_inta_has_fpaa_weight UpperCAmelCase_ : Union[str, Any] = bnb_abit_quant_type UpperCAmelCase_ : Dict = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: UpperCAmelCase_ : int = torch.floataa elif isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : str = getattr(_UpperCamelCase , _UpperCamelCase ) elif isinstance(_UpperCamelCase , torch.dtype ): UpperCAmelCase_ : Optional[Any] = bnb_abit_compute_dtype else: raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' ) self.post_init() def __UpperCAmelCase ( self ) -> int: if not isinstance(self.llm_inta_threshold , _UpperCamelCase ): raise ValueError('llm_int8_threshold must be a float' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _UpperCamelCase ): raise ValueError('llm_int8_skip_modules must be a list of strings' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _UpperCamelCase ): raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' ) if not isinstance(self.llm_inta_has_fpaa_weight , _UpperCamelCase ): raise ValueError('llm_int8_has_fp16_weight must be a boolean' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' ) if not isinstance(self.bnb_abit_quant_type , _UpperCamelCase ): raise ValueError('bnb_4bit_quant_type must be a string' ) if not isinstance(self.bnb_abit_use_double_quant , _UpperCamelCase ): raise ValueError('bnb_4bit_use_double_quant must be a boolean' ) if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse( '0.39.0' ): raise ValueError( '4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' ) def __UpperCAmelCase ( self ) -> str: return self.load_in_abit or self.load_in_abit def __UpperCAmelCase ( self ) -> List[str]: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Tuple: UpperCAmelCase_ : str = cls(**_UpperCamelCase ) UpperCAmelCase_ : Dict = [] for key, value in kwargs.items(): if hasattr(_UpperCamelCase , _UpperCamelCase ): setattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) to_remove.append(_UpperCamelCase ) for key in to_remove: kwargs.pop(_UpperCamelCase , _UpperCamelCase ) if return_unused_kwargs: return config, kwargs else: return config def __UpperCAmelCase ( self , _UpperCamelCase ) -> int: with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer: UpperCAmelCase_ : Union[str, Any] = self.to_dict() UpperCAmelCase_ : Optional[Any] = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '\n' writer.write(_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Dict[str, Any]: UpperCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1] return output def __repr__( self ) -> Optional[Any]: return f"{self.__class__.__name__} {self.to_json_string()}" def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> str: if use_diff is True: UpperCAmelCase_ : Tuple = self.to_diff_dict() else: UpperCAmelCase_ : Dict = self.to_dict() return json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + "\n" def __UpperCAmelCase ( self ) -> Dict[str, Any]: UpperCAmelCase_ : str = self.to_dict() # get the default config dict UpperCAmelCase_ : Optional[Any] = BitsAndBytesConfig().to_dict() UpperCAmelCase_ : Optional[int] = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: UpperCAmelCase_ : List[str] = value return serializable_config_dict
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[Any] = '''data2vec-audio''' def __init__( self , _UpperCamelCase=3_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase="gelu" , _UpperCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase=(1_0, 3, 3, 3, 3, 2, 2) , _UpperCamelCase=False , _UpperCamelCase=1_6 , _UpperCamelCase=1_9 , _UpperCamelCase=5 , _UpperCamelCase=0.05 , _UpperCamelCase=1_0 , _UpperCamelCase=2 , _UpperCamelCase=0.0 , _UpperCamelCase=1_0 , _UpperCamelCase=0 , _UpperCamelCase="sum" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=2_5_6 , _UpperCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _UpperCamelCase=(5, 3, 3, 1, 1) , _UpperCamelCase=(1, 2, 3, 1, 1) , _UpperCamelCase=5_1_2 , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=False , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=None , **_UpperCamelCase , ) -> Any: super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Optional[int] = feat_extract_activation UpperCAmelCase_ : Optional[Any] = list(_UpperCamelCase ) UpperCAmelCase_ : int = list(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = list(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = conv_bias UpperCAmelCase_ : int = num_conv_pos_embeddings UpperCAmelCase_ : int = num_conv_pos_embedding_groups UpperCAmelCase_ : List[str] = conv_pos_kernel_size UpperCAmelCase_ : List[Any] = len(self.conv_dim ) UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Any = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_dropout UpperCAmelCase_ : Any = attention_dropout UpperCAmelCase_ : Dict = activation_dropout UpperCAmelCase_ : Optional[Any] = feat_proj_dropout UpperCAmelCase_ : Any = final_dropout UpperCAmelCase_ : Optional[Any] = layerdrop UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : List[Any] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Tuple = mask_time_prob UpperCAmelCase_ : int = mask_time_length UpperCAmelCase_ : Tuple = mask_time_min_masks UpperCAmelCase_ : Dict = mask_feature_prob UpperCAmelCase_ : List[str] = mask_feature_length UpperCAmelCase_ : Optional[Any] = mask_feature_min_masks # ctc loss UpperCAmelCase_ : Dict = ctc_loss_reduction UpperCAmelCase_ : Any = ctc_zero_infinity # adapter UpperCAmelCase_ : List[Any] = add_adapter UpperCAmelCase_ : Tuple = adapter_kernel_size UpperCAmelCase_ : Tuple = adapter_stride UpperCAmelCase_ : Any = num_adapter_layers UpperCAmelCase_ : List[str] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase_ : int = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase_ : Dict = list(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = list(_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = list(_UpperCamelCase ) UpperCAmelCase_ : int = xvector_output_dim @property def __UpperCAmelCase ( self ) -> str: return math.prod(self.conv_stride )
29
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict: logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) UpperCAmelCase_ : Any = model UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase ) def __call__( self , **_UpperCamelCase ) -> str: UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()} return self.model.run(_UpperCamelCase , _UpperCamelCase ) @staticmethod def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) UpperCAmelCase_ : List[str] = 'CPUExecutionProvider' return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict: UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name ) UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase ) if src_path.exists(): UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]: if os.path.isfile(_UpperCamelCase ): logger.error(f"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) # saving model weights/files self._save_pretrained(_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]: UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model( os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) UpperCAmelCase_ : Tuple = Path(_UpperCamelCase ) # load model from hub else: # download model UpperCAmelCase_ : List[str] = hf_hub_download( repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , ) UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) return cls(model=_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]: UpperCAmelCase_ : List[str] = None if len(str(_UpperCamelCase ).split('@' ) ) == 2: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' ) return cls._from_pretrained( model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
29
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} __UpperCAmelCase = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } __UpperCAmelCase = { 'allenai/longformer-base-4096': 4096, 'allenai/longformer-large-4096': 4096, 'allenai/longformer-large-4096-finetuned-triviaqa': 4096, 'allenai/longformer-base-4096-extra.pos.embd.only': 4096, 'allenai/longformer-large-4096-extra.pos.embd.only': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : str = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) UpperCAmelCase_ : Any = bs[:] UpperCAmelCase_ : Tuple = 0 for b in range(2**8 ): if b not in bs: bs.append(__snake_case ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Optional[Any] = [chr(__snake_case ) for n in cs] return dict(zip(__snake_case , __snake_case ) ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : List[str] = set() UpperCAmelCase_ : List[str] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Optional[Any] = char return pairs class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[Any] = VOCAB_FILES_NAMES _snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : str = ['''input_ids''', '''attention_mask'''] def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="replace" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=False , **_UpperCamelCase , ) -> Optional[int]: UpperCAmelCase_ : Tuple = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token UpperCAmelCase_ : List[str] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token UpperCAmelCase_ : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token UpperCAmelCase_ : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token UpperCAmelCase_ : Dict = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token UpperCAmelCase_ : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Dict = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token super().__init__( errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding='utf-8' ) as vocab_handle: UpperCAmelCase_ : Any = json.load(_UpperCamelCase ) UpperCAmelCase_ : Any = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : List[Any] = errors # how to handle errors in decoding UpperCAmelCase_ : Any = bytes_to_unicode() UpperCAmelCase_ : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(_UpperCamelCase , encoding='utf-8' ) as merges_handle: UpperCAmelCase_ : int = merges_handle.read().split('\n' )[1:-1] UpperCAmelCase_ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : List[str] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) ) UpperCAmelCase_ : int = {} UpperCAmelCase_ : Union[str, Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : str = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def __UpperCAmelCase ( self ) -> Optional[int]: return len(self.encoder ) def __UpperCAmelCase ( self ) -> Optional[int]: return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: if token in self.cache: return self.cache[token] UpperCAmelCase_ : List[Any] = tuple(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = get_pairs(_UpperCamelCase ) if not pairs: return token while True: UpperCAmelCase_ : List[str] = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float('inf' ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = bigram UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Dict = 0 while i < len(_UpperCamelCase ): try: UpperCAmelCase_ : int = word.index(_UpperCamelCase , _UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : int = j if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : Tuple = tuple(_UpperCamelCase ) UpperCAmelCase_ : Dict = new_word if len(_UpperCamelCase ) == 1: break else: UpperCAmelCase_ : Tuple = get_pairs(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = ' '.join(_UpperCamelCase ) UpperCAmelCase_ : List[str] = word return word def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = [] for token in re.findall(self.pat , _UpperCamelCase ): UpperCAmelCase_ : Dict = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(' ' ) ) return bpe_tokens def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]: return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: return self.decoder.get(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: UpperCAmelCase_ : Optional[Any] = ''.join(_UpperCamelCase ) UpperCAmelCase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase_ : Tuple = os.path.join( _UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) UpperCAmelCase_ : Dict = os.path.join( _UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + '\n' ) UpperCAmelCase_ : Union[str, Any] = 0 with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ' Please check that the tokenizer is not corrupted!' ) UpperCAmelCase_ : Union[str, Any] = token_index writer.write(' '.join(_UpperCamelCase ) + '\n' ) index += 1 return vocab_file, merge_file def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : List[Any] = [self.cls_token_id] UpperCAmelCase_ : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : Dict = [self.sep_token_id] UpperCAmelCase_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , **_UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()): UpperCAmelCase_ : Optional[Any] = ' ' + text return (text, kwargs)
29
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : Tuple = datasets.Features( { 'tokens': datasets.Sequence(datasets.Value('string' ) ), 'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ), 'answers': datasets.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), 'id': datasets.Value('int64' ), } ) UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict( { 'tokens': [['foo'] * 5] * n, 'labels': [[1] * 5] * n, 'answers': [{'answer_start': [97], 'text': ['1976']}] * 10, 'id': list(range(__snake_case ) ), } , features=__snake_case , ) return dataset @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' ) dataset.map(cache_file_name=__snake_case ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt' UpperCAmelCase_ : Tuple = FILE_CONTENT with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' import bza UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2' UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' ) with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' ) UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' ) with gzip.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lza.frame.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ): '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z' with pyazr.SevenZipFile(__snake_case , 'w' ) as archive: archive.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ): '''simple docstring''' import tarfile UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' import lzma UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lzma.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ): '''simple docstring''' import zipfile UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst' UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' ) with zstd.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml' UpperCAmelCase_ : List[Any] = textwrap.dedent( '\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' ) with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case ) UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' ) dataset.map(cache_file_name=__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' ) with contextlib.closing(sqlitea.connect(__snake_case ) ) as con: UpperCAmelCase_ : List[Any] = con.cursor() cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' ) for item in DATA: cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any ): '''simple docstring''' import bza UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2' with open(__snake_case , 'rb' ) as f: UpperCAmelCase_ : int = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) ) f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' ) UpperCAmelCase_ : Dict = pa.schema( { 'col_1': pa.string(), 'col_2': pa.intaa(), 'col_3': pa.floataa(), } ) with open(__snake_case , 'wb' ) as f: UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case ) UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case ) writer.write_table(__snake_case ) writer.close() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Optional[int] = {'data': DATA} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_312: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_STR: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Dict ): '''simple docstring''' import gzip UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int , __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Any = ['0', '1', '2', '3'] UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3'] UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Dict = ['0', '1', '2', '3'] UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc' with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) ) f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] ) UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' ) @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' ) @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' ) (data_dir / "subdir").mkdir() with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden file with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) return data_dir
29
1
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase (_snake_case , _snake_case ): '''simple docstring''' @register_to_config def __init__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ) -> Optional[int]: super().__init__() UpperCAmelCase_ : str = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCAmelCase_ : List[str] = torch.zeros(_UpperCamelCase , _UpperCamelCase ) else: UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : int = torch.nn.Parameter(_UpperCamelCase ) class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : VQModel _snake_case : CLIPTextModel _snake_case : CLIPTokenizer _snake_case : TransformeraDModel _snake_case : LearnedClassifierFreeSamplingEmbeddings _snake_case : VQDiffusionScheduler def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Optional[int]: super().__init__() self.register_modules( vqvae=_UpperCamelCase , transformer=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , scheduler=_UpperCamelCase , learned_classifier_free_sampling_embeddings=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Tuple = len(_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else 1 # get prompt text embeddings UpperCAmelCase_ : Optional[Any] = self.tokenizer( _UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) UpperCAmelCase_ : Optional[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase_ : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCAmelCase_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase_ : str = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCAmelCase_ : Any = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase ) # duplicate text embeddings for each generation per prompt UpperCAmelCase_ : Dict = prompt_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCAmelCase_ : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings UpperCAmelCase_ : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(_UpperCamelCase , 1 , 1 ) else: UpperCAmelCase_ : Tuple = [''] * batch_size UpperCAmelCase_ : Union[str, Any] = text_input_ids.shape[-1] UpperCAmelCase_ : List[Any] = self.tokenizer( _UpperCamelCase , padding='max_length' , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='pt' , ) UpperCAmelCase_ : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCAmelCase_ : List[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase_ : Dict = negative_prompt_embeds.shape[1] UpperCAmelCase_ : Optional[Any] = negative_prompt_embeds.repeat(1 , _UpperCamelCase , 1 ) UpperCAmelCase_ : Union[str, Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : str = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , _UpperCamelCase , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 5.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = 1 , ) -> Union[ImagePipelineOutput, Tuple]: if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Any = 1 elif isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt UpperCAmelCase_ : int = guidance_scale > 1.0 UpperCAmelCase_ : Tuple = self._encode_prompt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(_UpperCamelCase )}." ) # get the initial completely masked latents unless the user supplied it UpperCAmelCase_ : Tuple = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCAmelCase_ : Optional[Any] = self.transformer.num_vector_embeds - 1 UpperCAmelCase_ : Any = torch.full(_UpperCamelCase , _UpperCamelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( 'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,' f" {self.transformer.num_vector_embeds - 1} (inclusive)." ) UpperCAmelCase_ : Dict = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(_UpperCamelCase , device=self.device ) UpperCAmelCase_ : List[Any] = self.scheduler.timesteps.to(self.device ) UpperCAmelCase_ : Optional[int] = latents for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the sample if we are doing classifier free guidance UpperCAmelCase_ : Any = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCAmelCase_ : Optional[int] = self.transformer(_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase ).sample if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_output.chunk(2 ) UpperCAmelCase_ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(_UpperCamelCase , dim=1 , keepdim=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.truncate(_UpperCamelCase , _UpperCamelCase ) # remove `log(0)`'s (`-inf`s) UpperCAmelCase_ : Optional[Any] = model_output.clamp(-7_0 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : Optional[Any] = self.scheduler.step(_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , generator=_UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : List[str] = self.vqvae.config.vq_embed_dim UpperCAmelCase_ : Union[str, Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCAmelCase_ : List[Any] = self.vqvae.quantize.get_codebook_entry(_UpperCamelCase , shape=_UpperCamelCase ) UpperCAmelCase_ : Dict = self.vqvae.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase ).sample UpperCAmelCase_ : str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = self.numpy_to_pil(_UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> torch.FloatTensor: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.sort(_UpperCamelCase , 1 , descending=_UpperCamelCase ) UpperCAmelCase_ : Tuple = torch.exp(_UpperCamelCase ) UpperCAmelCase_ : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCAmelCase_ : str = torch.full_like(keep_mask[:, 0:1, :] , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = torch.cat((all_true, keep_mask) , dim=1 ) UpperCAmelCase_ : Tuple = keep_mask[:, :-1, :] UpperCAmelCase_ : List[str] = keep_mask.gather(1 , indices.argsort(1 ) ) UpperCAmelCase_ : str = log_p_x_0.clone() UpperCAmelCase_ : Optional[int] = -torch.inf # -inf = log(0) return rv
29
from __future__ import annotations def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position UpperCAmelCase_ : str = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ : Optional[Any] = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__snake_case ) return permissible_positions def lowercase__ ( __snake_case : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' if is_complete(__snake_case ): return True for position in get_valid_pos(__snake_case , len(__snake_case ) ): UpperCAmelCase_ , UpperCAmelCase_ : Any = position if board[y][x] == 0: UpperCAmelCase_ : Optional[Any] = curr + 1 if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ): return True UpperCAmelCase_ : List[Any] = 0 return False def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): UpperCAmelCase_ : Optional[Any] = 1 if open_knight_tour_helper(__snake_case , (i, j) , 1 ): return board UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
29
1
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __UpperCAmelCase = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def lowercase__ ( __snake_case : str = "dhaka" , __snake_case : int = 5 ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = min(__snake_case , 50 ) # Prevent abuse! UpperCAmelCase_ : Dict = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } UpperCAmelCase_ : Any = requests.get('https://www.google.com/search' , params=__snake_case , headers=__snake_case ) UpperCAmelCase_ : Dict = BeautifulSoup(html.text , 'html.parser' ) UpperCAmelCase_ : Any = ''.join( re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) UpperCAmelCase_ : int = json.dumps(__snake_case ) UpperCAmelCase_ : List[Any] = json.loads(__snake_case ) UpperCAmelCase_ : Union[str, Any] = re.findall( R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , __snake_case , ) if not matched_google_image_data: return 0 UpperCAmelCase_ : Union[str, Any] = re.sub( R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(__snake_case ) , ) UpperCAmelCase_ : Optional[int] = re.findall( R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , __snake_case , ) for index, fixed_full_res_image in enumerate(__snake_case ): if index >= max_images: return index UpperCAmelCase_ : Optional[int] = bytes(__snake_case , 'ascii' ).decode( 'unicode-escape' ) UpperCAmelCase_ : Union[str, Any] = bytes(__snake_case , 'ascii' ).decode( 'unicode-escape' ) UpperCAmelCase_ : Union[str, Any] = urllib.request.build_opener() UpperCAmelCase_ : Dict = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(__snake_case ) UpperCAmelCase_ : Union[str, Any] = F"query_{query.replace(' ' , '_' )}" if not os.path.exists(__snake_case ): os.makedirs(__snake_case ) urllib.request.urlretrieve( # noqa: S310 __snake_case , F"{path_name}/original_size_img_{index}.jpg" ) return index if __name__ == "__main__": try: __UpperCAmelCase = download_images_from_google_query(sys.argv[1]) print(F'{image_count} images were downloaded to disk.') except IndexError: print('Please provide a search term.') raise
29
def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase_ : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , __snake_case ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __UpperCAmelCase = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: __UpperCAmelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
29
1
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict: logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) UpperCAmelCase_ : Any = model UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase ) def __call__( self , **_UpperCamelCase ) -> str: UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()} return self.model.run(_UpperCamelCase , _UpperCamelCase ) @staticmethod def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) UpperCAmelCase_ : List[str] = 'CPUExecutionProvider' return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict: UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name ) UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase ) if src_path.exists(): UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]: if os.path.isfile(_UpperCamelCase ): logger.error(f"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) # saving model weights/files self._save_pretrained(_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]: UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model( os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) UpperCAmelCase_ : Tuple = Path(_UpperCamelCase ) # load model from hub else: # download model UpperCAmelCase_ : List[str] = hf_hub_download( repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , ) UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) return cls(model=_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]: UpperCAmelCase_ : List[str] = None if len(str(_UpperCamelCase ).split('@' ) ) == 2: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' ) return cls._from_pretrained( model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
29
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) self.check_model_type(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {} if padding is not None: UpperCAmelCase_ : List[str] = padding if truncation is not None: UpperCAmelCase_ : Tuple = truncation if top_k is not None: UpperCAmelCase_ : Dict = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int: if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question} else: UpperCAmelCase_ : List[str] = image UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = load_image(inputs['image'] ) UpperCAmelCase_ : Dict = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase ) UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) model_inputs.update(_UpperCamelCase ) return model_inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : Any = self.model(**_UpperCamelCase ) return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str: if top_k > self.model.config.num_labels: UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase_ : Optional[Any] = scores.tolist() UpperCAmelCase_ : Tuple = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
29
1
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Any = [False] * len(__snake_case ) UpperCAmelCase_ : Dict = [-1] * len(__snake_case ) def dfs(__snake_case : Dict , __snake_case : Tuple ): UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Dict = c for u in graph[v]: if not visited[u]: dfs(__snake_case , 1 - c ) for i in range(len(__snake_case ) ): if not visited[i]: dfs(__snake_case , 0 ) for i in range(len(__snake_case ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __UpperCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
29
import os # Precomputes a list of the 100 first triangular numbers __UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' ) UpperCAmelCase_ : Union[str, Any] = '' with open(__snake_case ) as f: UpperCAmelCase_ : List[Any] = f.readline() UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] UpperCAmelCase_ : Optional[int] = [ word for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__snake_case ) if __name__ == "__main__": print(solution())
29
1
import requests from bsa import BeautifulSoup def lowercase__ ( __snake_case : str = "https://www.worldometers.info/coronavirus" ): '''simple docstring''' UpperCAmelCase_ : int = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' ) UpperCAmelCase_ : Tuple = soup.findAll('h1' ) UpperCAmelCase_ : Tuple = soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(F'{key}\n{value}\n')
29
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __UpperCAmelCase = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __UpperCAmelCase = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( __snake_case : str ): '''simple docstring''' if "://" in dataset_path: UpperCAmelCase_ : int = dataset_path.split('://' )[1] return dataset_path def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ): '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) ) else: fs.mv(__snake_case , __snake_case , recursive=__snake_case ) def lowercase__ ( ): '''simple docstring''' if hasattr(fsspec.asyn , 'reset_lock' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = threading.Lock()
29
1
from __future__ import annotations def lowercase__ ( __snake_case : float , __snake_case : float , __snake_case : float , ): '''simple docstring''' if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative in a semiconductor' ) elif hole_conc < 0: raise ValueError('Hole concentration cannot be negative in a semiconductor' ) elif intrinsic_conc < 0: raise ValueError( 'Intrinsic concentration cannot be negative in a semiconductor' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
29
def lowercase__ ( __snake_case : list ): '''simple docstring''' for i in range(len(__snake_case ) - 1 , 0 , -1 ): UpperCAmelCase_ : Dict = False for j in range(__snake_case , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j] UpperCAmelCase_ : int = True for j in range(__snake_case ): if unsorted[j] > unsorted[j + 1]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j] UpperCAmelCase_ : Any = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip() __UpperCAmelCase = [int(item) for item in user_input.split(',')] print(F'{cocktail_shaker_sort(unsorted) = }')
29
1
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase__ ( __snake_case : Any , __snake_case : Any=10 ): '''simple docstring''' UpperCAmelCase_ : List[Any] = [] for _ in range(__snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase__ ( __snake_case : List[str] , __snake_case : int=10 ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = [] for step in range(__snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : List[str] = os.path.join(__snake_case , 'schedule.bin' ) torch.save(scheduler.state_dict() , __snake_case ) UpperCAmelCase_ : str = torch.load(__snake_case ) scheduler.load_state_dict(__snake_case ) return lrs @require_torch class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]: self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) ) for a, b in zip(_UpperCamelCase , _UpperCamelCase ): self.assertAlmostEqual(_UpperCamelCase , _UpperCamelCase , delta=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCamelCase ) UpperCAmelCase_ : List[str] = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase_ : Dict = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase_ : int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase_ : Optional[int] = criterion(_UpperCamelCase , _UpperCamelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCamelCase ) UpperCAmelCase_ : Dict = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase_ : int = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase_ : str = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_UpperCamelCase , weight_decay=0.0 , relative_step=_UpperCamelCase , scale_parameter=_UpperCamelCase , warmup_init=_UpperCamelCase , ) for _ in range(1_0_0_0 ): UpperCAmelCase_ : List[str] = criterion(_UpperCamelCase , _UpperCamelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class lowerCamelCase (unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None _snake_case : Union[str, Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None _snake_case : Dict = 1_0 def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> str: self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) ) for a, b in zip(_UpperCamelCase , _UpperCamelCase ): self.assertAlmostEqual(_UpperCamelCase , _UpperCamelCase , delta=_UpperCamelCase , msg=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = {'num_warmup_steps': 2, 'num_training_steps': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase_ : List[str] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1E-7}, [0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = data UpperCAmelCase_ : Any = scheduler_func(self.optimizer , **_UpperCamelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase_ : Optional[int] = unwrap_schedule(_UpperCamelCase , self.num_steps ) self.assertListAlmostEqual( _UpperCamelCase , _UpperCamelCase , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase_ : Dict = scheduler_func(self.optimizer , **_UpperCamelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_UpperCamelCase ) # wrap to test picklability of the schedule UpperCAmelCase_ : Union[str, Any] = unwrap_and_save_reload_schedule(_UpperCamelCase , self.num_steps ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase , msg=f"failed for {scheduler_func} in save and reload" ) class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase ) -> List[str]: UpperCAmelCase_ : Dict = fn def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]: return self.fn(*_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]: UpperCAmelCase_ : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
29
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ): '''simple docstring''' UpperCAmelCase_ : Dict = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : List[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ): '''simple docstring''' UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) ) UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1 UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] ) UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 ) return image class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]: super().__init__() self.register_modules( unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , ) UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: # get the original timestep using init_timestep UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple: if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase ) UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCAmelCase_ : List[str] = image else: if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Any = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase ) ] UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 ) else: UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase ) UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 ) UpperCAmelCase_ : Tuple = init_latents.shape UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) # get latents UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = init_latents return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : Optional[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=_UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : Dict = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase ) # We'll offload the last model manually. UpperCAmelCase_ : Any = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __UpperCAmelCase ( self ) -> Dict: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str: UpperCAmelCase_ : Any = self._execution_device UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0 if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0] if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase ) if not isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Tuple = [image] if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" ) UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 ) UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents'] UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 ) self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt ) UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor ) UpperCAmelCase_ : Dict = self.prepare_latents( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : str = {'image_embeds': image_embeds} UpperCAmelCase_ : Union[str, Any] = self.unet( sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 ) UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0] # post-processing UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[str] = image * 0.5 + 0.5 UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCamelCase )
29
1
def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase_ : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , __snake_case ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __UpperCAmelCase = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: __UpperCAmelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
29
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ): '''simple docstring''' try: UpperCAmelCase_ : int = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase_ : Optional[int] = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase_ : List[Any] = strtobool(__snake_case ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value __UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skip('Test was skipped' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : Optional[int] ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case ) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case ) def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ): '''simple docstring''' if test_case is None: return partial(__snake_case , version=__snake_case ) return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case ) __UpperCAmelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = True @classmethod def __UpperCAmelCase ( cls ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = tempfile.mkdtemp() @classmethod def __UpperCAmelCase ( cls ) -> List[str]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCAmelCase ( self ) -> str: if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_UpperCamelCase ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Optional[int]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = AcceleratorState() UpperCAmelCase_ : str = tensor[None].clone().to(state.device ) UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu() UpperCAmelCase_ : List[Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __snake_case ): return False return True class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : str = returncode UpperCAmelCase_ : Optional[Any] = stdout UpperCAmelCase_ : Optional[Any] = stderr async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ): '''simple docstring''' while True: UpperCAmelCase_ : Dict = await stream.readline() if line: callback(__snake_case ) else: break async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ): '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : str = [] def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ): UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip() sink.append(__snake_case ) if not quiet: print(__snake_case , __snake_case , file=__snake_case ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ), ] , timeout=__snake_case , ) return _RunOutput(await p.wait() , __snake_case , __snake_case ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ): '''simple docstring''' UpperCAmelCase_ : str = asyncio.get_event_loop() UpperCAmelCase_ : int = loop.run_until_complete( _stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) ) UpperCAmelCase_ : int = ' '.join(__snake_case ) if result.returncode > 0: UpperCAmelCase_ : int = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class lowerCamelCase (_snake_case ): '''simple docstring''' pass def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ): '''simple docstring''' try: UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__snake_case , 'decode' ): UpperCAmelCase_ : str = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
29
1
__UpperCAmelCase = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __UpperCAmelCase = ['a', 'b', 'c', 'd', 'e'] def lowercase__ ( __snake_case : List[str] , __snake_case : str , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = start # add current to visited visited.append(__snake_case ) UpperCAmelCase_ : Optional[int] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: UpperCAmelCase_ : str = topological_sort(__snake_case , __snake_case , __snake_case ) # if all neighbors visited add current to sort sort.append(__snake_case ) # if all vertices haven't been visited select a new one to visit if len(__snake_case ) != len(__snake_case ): for vertice in vertices: if vertice not in visited: UpperCAmelCase_ : Optional[Any] = topological_sort(__snake_case , __snake_case , __snake_case ) # return sort return sort if __name__ == "__main__": __UpperCAmelCase = topological_sort('a', [], []) print(sort)
29
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __UpperCAmelCase = logging.getLogger(__name__) def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ): '''simple docstring''' def get_dataset(__snake_case : Optional[Any] ): UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCAmelCase_ : Any = get_dataset(__snake_case ) UpperCAmelCase_ : str = get_dataset(__snake_case ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [] for epoch in range(__snake_case ): # Train quickly model.train() for batch in dataloader: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch UpperCAmelCase_ : List[Any] = model(__snake_case ) UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case ) accelerator.backward(__snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCamelCase (nn.Module ): '''simple docstring''' def __init__( self ) -> Optional[Any]: super().__init__() UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) ) UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]: return x * self.a + self.b class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[Any] = DummyModel() UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() # Train baseline UpperCAmelCase_ : Tuple = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' ) accelerator.save_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Any = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders() UpperCAmelCase_ : Optional[Any] = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' ) accelerator.save_state(_UpperCamelCase ) # Load everything back in and make sure all states work accelerator.load_state(_UpperCamelCase ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders() UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : Any = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item() UpperCAmelCase_ : List[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] ) UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] ) UpperCAmelCase_ : Union[str, Any] = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() ) UpperCAmelCase_ : Any = Accelerator() with self.assertRaises(_UpperCamelCase ) as ve: accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() UpperCAmelCase_ : Dict = scheduler.state_dict() train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(_UpperCamelCase , scheduler.state_dict() ) def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[int] = DummyModel() UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 ) # Train baseline UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase = '/tmp/accelerate/state_checkpointing' __UpperCAmelCase = DummyModel() __UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3) __UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) __UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders() __UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert param_device.type == accelerator.device.type __UpperCAmelCase = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
29
1
__UpperCAmelCase = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', } def lowercase__ ( __snake_case : float ): '''simple docstring''' assert type(__snake_case ) in (int, float) and decimal == int(__snake_case ) UpperCAmelCase_ : Tuple = int(__snake_case ) UpperCAmelCase_ : int = '' UpperCAmelCase_ : Dict = False if decimal < 0: UpperCAmelCase_ : Any = True decimal *= -1 while decimal > 0: UpperCAmelCase_ , UpperCAmelCase_ : Dict = divmod(__snake_case , 16 ) UpperCAmelCase_ : Optional[int] = values[remainder] + hexadecimal UpperCAmelCase_ : str = '0x' + hexadecimal if negative: UpperCAmelCase_ : Optional[int] = '-' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
29
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None: warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
29
1
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : str = '''segformer''' def __init__( self , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[2, 2, 2, 2] , _UpperCamelCase=[8, 4, 2, 1] , _UpperCamelCase=[3_2, 6_4, 1_6_0, 2_5_6] , _UpperCamelCase=[7, 3, 3, 3] , _UpperCamelCase=[4, 2, 2, 2] , _UpperCamelCase=[1, 2, 5, 8] , _UpperCamelCase=[4, 4, 4, 4] , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-6 , _UpperCamelCase=2_5_6 , _UpperCamelCase=2_5_5 , **_UpperCamelCase , ) -> List[Any]: super().__init__(**_UpperCamelCase ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' , _UpperCamelCase , ) UpperCAmelCase_ : str = num_channels UpperCAmelCase_ : List[Any] = num_encoder_blocks UpperCAmelCase_ : str = depths UpperCAmelCase_ : List[Any] = sr_ratios UpperCAmelCase_ : List[str] = hidden_sizes UpperCAmelCase_ : List[str] = patch_sizes UpperCAmelCase_ : Dict = strides UpperCAmelCase_ : Dict = mlp_ratios UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = classifier_dropout_prob UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : List[str] = drop_path_rate UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : int = decoder_hidden_size UpperCAmelCase_ : Optional[Any] = kwargs.get('reshape_last_stage' , _UpperCamelCase ) UpperCAmelCase_ : List[str] = semantic_loss_ignore_index class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Any = version.parse('''1.11''' ) @property def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __UpperCAmelCase ( self ) -> float: return 1E-4 @property def __UpperCAmelCase ( self ) -> int: return 1_2
29
def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head: return True # split the list to two parts UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head while fast and fast.next: UpperCAmelCase_ : str = fast.next.next UpperCAmelCase_ : Union[str, Any] = slow.next UpperCAmelCase_ : int = slow.next UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase_ : Tuple = None while second: UpperCAmelCase_ : int = second.next UpperCAmelCase_ : Any = node UpperCAmelCase_ : Optional[Any] = second UpperCAmelCase_ : Tuple = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase_ : Optional[Any] = node.next UpperCAmelCase_ : Dict = head.next return True def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase_ : Any = head while fast and fast.next: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase_ : List[str] = [slow.val] while slow.next: UpperCAmelCase_ : List[str] = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase_ : int = cur.next return True def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head or not head.next: return True UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : int = 0 while head: if head.val in d: d[head.val].append(__snake_case ) else: UpperCAmelCase_ : List[Any] = [pos] UpperCAmelCase_ : Any = head.next pos += 1 UpperCAmelCase_ : Dict = pos - 1 UpperCAmelCase_ : Optional[int] = 0 for v in d.values(): if len(__snake_case ) % 2 != 0: middle += 1 else: UpperCAmelCase_ : int = 0 for i in range(0 , len(__snake_case ) ): if v[i] + v[len(__snake_case ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
29
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase = logging.get_logger(__name__) class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Dict = ['''pixel_values'''] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_5_5 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ) -> None: super().__init__(**_UpperCamelCase ) UpperCAmelCase_ : str = size if size is not None else {'height': 3_8_4, 'width': 3_8_4} UpperCAmelCase_ : Optional[Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) UpperCAmelCase_ : Dict = do_resize UpperCAmelCase_ : Optional[int] = size UpperCAmelCase_ : int = resample UpperCAmelCase_ : Any = do_rescale UpperCAmelCase_ : Dict = rescale_factor UpperCAmelCase_ : Union[str, Any] = do_normalize UpperCAmelCase_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase_ : int = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase_ : Dict = do_convert_rgb def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray: UpperCAmelCase_ : List[str] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" ) UpperCAmelCase_ : Optional[int] = (size['height'], size['width']) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> Dict: return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray: return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> PIL.Image.Image: UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : List[Any] = resample if resample is not None else self.resample UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase_ : List[Any] = size if size is not None else self.size UpperCAmelCase_ : int = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) UpperCAmelCase_ : List[str] = make_list_of_images(_UpperCamelCase ) if not valid_images(_UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase_ : Dict = [convert_to_rgb(_UpperCamelCase ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase_ : List[str] = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: UpperCAmelCase_ : str = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_rescale: UpperCAmelCase_ : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: UpperCAmelCase_ : Union[str, Any] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] UpperCAmelCase_ : int = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] UpperCAmelCase_ : Any = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCamelCase ) return encoded_outputs
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
1
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder __UpperCAmelCase = datasets.utils.logging.get_logger(__name__) class lowerCamelCase (folder_based_builder.FolderBasedBuilderConfig ): '''simple docstring''' _snake_case : bool = None _snake_case : bool = None class lowerCamelCase (folder_based_builder.FolderBasedBuilder ): '''simple docstring''' _snake_case : int = datasets.Audio() _snake_case : Optional[int] = '''audio''' _snake_case : int = AudioFolderConfig _snake_case : List[str] # definition at the bottom of the script _snake_case : Dict = AudioClassification(audio_column='''audio''' , label_column='''label''' ) __UpperCAmelCase = [ '.aiff', '.au', '.avr', '.caf', '.flac', '.htk', '.svx', '.mat4', '.mat5', '.mpc2k', '.ogg', '.paf', '.pvf', '.raw', '.rf64', '.sd2', '.sds', '.ircam', '.voc', '.w64', '.wav', '.nist', '.wavex', '.wve', '.xi', '.mp3', '.opus', ] __UpperCAmelCase = AUDIO_EXTENSIONS
29
__UpperCAmelCase = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
29
1
import string import numpy def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' return b if a == 0 else greatest_common_divisor(b % a , __snake_case ) class lowerCamelCase : '''simple docstring''' _snake_case : List[Any] = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) _snake_case : str = numpy.vectorize(lambda _snake_case : x % 3_6 ) _snake_case : Any = numpy.vectorize(_snake_case ) def __init__( self , _UpperCamelCase ) -> None: UpperCAmelCase_ : Dict = self.modulus(_UpperCamelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key UpperCAmelCase_ : Dict = encrypt_key.shape[0] def __UpperCAmelCase ( self , _UpperCamelCase ) -> int: return self.key_string.index(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> str: return self.key_string[round(_UpperCamelCase )] def __UpperCAmelCase ( self ) -> None: UpperCAmelCase_ : Tuple = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: UpperCAmelCase_ : List[Any] = det % len(self.key_string ) UpperCAmelCase_ : Any = len(self.key_string ) if greatest_common_divisor(_UpperCamelCase , len(self.key_string ) ) != 1: UpperCAmelCase_ : Optional[Any] = ( f"determinant modular {req_l} of encryption key({det}) " f"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> str: UpperCAmelCase_ : List[str] = [char for char in text.upper() if char in self.key_string] UpperCAmelCase_ : List[Any] = chars[-1] while len(_UpperCamelCase ) % self.break_key != 0: chars.append(_UpperCamelCase ) return "".join(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> str: UpperCAmelCase_ : int = self.process_text(text.upper() ) UpperCAmelCase_ : Tuple = '' for i in range(0 , len(_UpperCamelCase ) - self.break_key + 1 , self.break_key ): UpperCAmelCase_ : Dict = text[i : i + self.break_key] UpperCAmelCase_ : Any = [self.replace_letters(_UpperCamelCase ) for char in batch] UpperCAmelCase_ : Union[str, Any] = numpy.array([vec] ).T UpperCAmelCase_ : Any = self.modulus(self.encrypt_key.dot(_UpperCamelCase ) ).T.tolist()[ 0 ] UpperCAmelCase_ : Dict = ''.join( self.replace_digits(_UpperCamelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def __UpperCAmelCase ( self ) -> numpy.ndarray: UpperCAmelCase_ : int = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: UpperCAmelCase_ : Tuple = det % len(self.key_string ) UpperCAmelCase_ : List[Any] = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: UpperCAmelCase_ : List[Any] = i break UpperCAmelCase_ : Optional[Any] = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(_UpperCamelCase ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> str: UpperCAmelCase_ : Union[str, Any] = self.make_decrypt_key() UpperCAmelCase_ : List[Any] = self.process_text(text.upper() ) UpperCAmelCase_ : Optional[Any] = '' for i in range(0 , len(_UpperCamelCase ) - self.break_key + 1 , self.break_key ): UpperCAmelCase_ : int = text[i : i + self.break_key] UpperCAmelCase_ : List[Any] = [self.replace_letters(_UpperCamelCase ) for char in batch] UpperCAmelCase_ : Union[str, Any] = numpy.array([vec] ).T UpperCAmelCase_ : Tuple = self.modulus(decrypt_key.dot(_UpperCamelCase ) ).T.tolist()[0] UpperCAmelCase_ : int = ''.join( self.replace_digits(_UpperCamelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[Any] = int(input('Enter the order of the encryption key: ' ) ) UpperCAmelCase_ : List[str] = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(__snake_case ): UpperCAmelCase_ : Dict = [int(__snake_case ) for x in input().split()] hill_matrix.append(__snake_case ) UpperCAmelCase_ : Any = HillCipher(numpy.array(__snake_case ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) UpperCAmelCase_ : Dict = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": UpperCAmelCase_ : Dict = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(__snake_case ) ) elif option == "2": UpperCAmelCase_ : List[Any] = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(__snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
29
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : "DiagonalGaussianDistribution" class lowerCamelCase (_snake_case , _snake_case ): '''simple docstring''' _snake_case : Optional[int] = True @register_to_config def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]: super().__init__() # pass init params to Encoder UpperCAmelCase_ : List[str] = Encoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , ) # pass init params to Decoder UpperCAmelCase_ : Dict = Decoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , ) UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 ) UpperCAmelCase_ : Any = False UpperCAmelCase_ : int = False # only relevant if vae tiling is enabled UpperCAmelCase_ : Optional[int] = self.config.sample_size UpperCAmelCase_ : int = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) UpperCAmelCase_ : Optional[Any] = 0.25 def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]: if isinstance(_UpperCamelCase , (Encoder, Decoder) ): UpperCAmelCase_ : Union[str, Any] = value def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int: UpperCAmelCase_ : Tuple = use_tiling def __UpperCAmelCase ( self ) -> Dict: self.enable_tiling(_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : str = True def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Optional[int] = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]: UpperCAmelCase_ : Optional[int] = {} def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): UpperCAmelCase_ : Optional[int] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return processors def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() ) if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): if not isinstance(_UpperCamelCase , _UpperCamelCase ): module.set_processor(_UpperCamelCase ) else: module.set_processor(processor.pop(f"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase ) if self.use_slicing and x.shape[0] > 1: UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )] UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase ) UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )] UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase ) for y in range(_UpperCamelCase ): UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase ) for x in range(_UpperCamelCase ): UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. UpperCAmelCase_ : List[str] = [] for i in range(0 , x.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : Any = [] for j in range(0 , x.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : str = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 ) UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. UpperCAmelCase_ : Union[str, Any] = [] for i in range(0 , z.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = [] for j in range(0 , z.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : Optional[Any] = sample UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist if sample_posterior: UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase ) else: UpperCAmelCase_ : int = posterior.mode() UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase )
29
1
import operator def lowercase__ ( __snake_case : list , __snake_case : bool = False , __snake_case : list | None = None ): '''simple docstring''' UpperCAmelCase_ : Dict = operator.lt if reverse else operator.gt UpperCAmelCase_ : Optional[Any] = solution or [] if not arr: return solution UpperCAmelCase_ : Dict = [arr.pop(0 )] for i, item in enumerate(__snake_case ): if _operator(__snake_case , sublist[-1] ): sublist.append(__snake_case ) arr.pop(__snake_case ) # merging sublist into solution list if not solution: solution.extend(__snake_case ) else: while sublist: UpperCAmelCase_ : Optional[Any] = sublist.pop(0 ) for i, xx in enumerate(__snake_case ): if not _operator(__snake_case , __snake_case ): solution.insert(__snake_case , __snake_case ) break else: solution.append(__snake_case ) strand_sort(__snake_case , __snake_case , __snake_case ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
29
def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
29
1
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class lowerCamelCase (_snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : str = BertJapaneseTokenizer _snake_case : Dict = False _snake_case : int = True def __UpperCAmelCase ( self ) -> str: super().setUp() UpperCAmelCase_ : str = [ '[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは', '世界', '##世界', '、', '##、', '。', '##。', ] UpperCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。' UpperCAmelCase_ : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。' return input_text, output_text def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.get_input_output_texts(_UpperCamelCase ) UpperCAmelCase_ : int = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : Tuple = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) return text, ids def __UpperCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> Optional[Any]: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> Optional[Any]: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Dict = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' ) self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' ) self.assertIsNotNone(_UpperCamelCase ) UpperCAmelCase_ : List[str] = 'こんにちは、世界。\nこんばんは、世界。' UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(_UpperCamelCase , 'wb' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , 'rb' ) as handle: UpperCAmelCase_ : str = pickle.load(_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = MecabTokenizer(mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def __UpperCAmelCase ( self ) -> Union[str, Any]: try: UpperCAmelCase_ : List[str] = MecabTokenizer(mecab_dic='unidic_lite' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def __UpperCAmelCase ( self ) -> Optional[Any]: try: UpperCAmelCase_ : Optional[int] = MecabTokenizer(mecab_dic='unidic' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Dict = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def __UpperCAmelCase ( self ) -> str: try: UpperCAmelCase_ : int = MecabTokenizer( do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Any = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , ) @require_sudachi def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' ) self.assertIsNotNone(_UpperCamelCase ) UpperCAmelCase_ : Any = 'こんにちは、世界。\nこんばんは、世界。' UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(_UpperCamelCase , 'wb' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , 'rb' ) as handle: UpperCAmelCase_ : Tuple = pickle.load(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) @require_sudachi def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : Tuple = SudachiTokenizer(sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , ) @require_sudachi def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] ) @require_sudachi def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] ) @require_sudachi def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] ) @require_sudachi def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : Tuple = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , ) @require_sudachi def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : List[str] = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , ) @require_sudachi def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[Any] = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' ) self.assertIsNotNone(_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。' UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) UpperCAmelCase_ : int = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(_UpperCamelCase , 'wb' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , 'rb' ) as handle: UpperCAmelCase_ : str = pickle.load(_UpperCamelCase ) UpperCAmelCase_ : Dict = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) @require_jumanpp def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : int = JumanppTokenizer(do_lower_case=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Any = JumanppTokenizer(normalize_text=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : List[Any] = JumanppTokenizer(trim_whitespace=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Dict = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , ) def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは'] UpperCAmelCase_ : Any = {} for i, token in enumerate(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = i UpperCAmelCase_ : List[Any] = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] ) self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] ) self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] ) def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : Union[str, Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' ) UpperCAmelCase_ : str = tokenizer.subword_tokenizer UpperCAmelCase_ : Union[str, Any] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' ) self.assertListEqual(_UpperCamelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] ) UpperCAmelCase_ : Any = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' ) self.assertListEqual(_UpperCamelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' ) UpperCAmelCase_ : Optional[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : str = tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCamelCase (_snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Dict = BertJapaneseTokenizer _snake_case : Any = False def __UpperCAmelCase ( self ) -> Tuple: super().setUp() UpperCAmelCase_ : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Optional[int]: return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。' UpperCAmelCase_ : Optional[int] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。' return input_text, output_text def __UpperCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> List[str]: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> str: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' ) UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' ) self.assertListEqual( _UpperCamelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] UpperCAmelCase_ : Dict = {} for i, token in enumerate(_UpperCamelCase ): UpperCAmelCase_ : Any = i UpperCAmelCase_ : int = CharacterTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] ) self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] ) def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' ) UpperCAmelCase_ : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : Dict = tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : List[Any] = 'cl-tohoku/bert-base-japanese' UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 'cl-tohoku/bert-base-japanese' with self.assertLogs('transformers' , level='WARNING' ) as cm: BertTokenizer.from_pretrained(_UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.' ) ) UpperCAmelCase_ : List[Any] = 'bert-base-cased' with self.assertLogs('transformers' , level='WARNING' ) as cm: BertJapaneseTokenizer.from_pretrained(_UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.' ) )
29
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.txt'} __UpperCAmelCase = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __UpperCAmelCase = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __UpperCAmelCase = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : int = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_INIT_CONFIGURATION _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Any = ConvBertTokenizer def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars ): UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) ) UpperCAmelCase_ : str = do_lower_case UpperCAmelCase_ : List[Any] = strip_accents UpperCAmelCase_ : str = tokenize_chinese_chars UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase ) UpperCAmelCase_ : Any = do_lower_case def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]: UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
29
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'spiece.model'} __UpperCAmelCase = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __UpperCAmelCase = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __UpperCAmelCase = 0 __UpperCAmelCase = 1 __UpperCAmelCase = 2 __UpperCAmelCase = 3 __UpperCAmelCase = 4 class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[str, Any] = VOCAB_FILES_NAMES _snake_case : int = PRETRAINED_VOCAB_FILES_MAP _snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Union[str, Any] = '''left''' def __init__( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<sep>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<cls>" , _UpperCamelCase="<mask>" , _UpperCamelCase=["<eop>", "<eod>"] , _UpperCamelCase = None , **_UpperCamelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : int = do_lower_case UpperCAmelCase_ : int = remove_space UpperCAmelCase_ : Dict = keep_accents UpperCAmelCase_ : List[str] = vocab_file UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> List[Any]: return len(self.sp_model ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[Any]: UpperCAmelCase_ : Tuple = self.__dict__.copy() UpperCAmelCase_ : Union[str, Any] = None return state def __setstate__( self , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Tuple = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): UpperCAmelCase_ : Union[str, Any] = {} UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict: if self.remove_space: UpperCAmelCase_ : Optional[int] = ' '.join(inputs.strip().split() ) else: UpperCAmelCase_ : Dict = inputs UpperCAmelCase_ : str = outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: UpperCAmelCase_ : List[str] = unicodedata.normalize('NFKD' , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = ''.join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] ) if self.do_lower_case: UpperCAmelCase_ : List[Any] = outputs.lower() return outputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = self.preprocess_text(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) UpperCAmelCase_ : str = [] for piece in pieces: if len(_UpperCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): UpperCAmelCase_ : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase_ : str = cur_pieces[1:] else: UpperCAmelCase_ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCamelCase ) else: new_pieces.append(_UpperCamelCase ) return new_pieces def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: return self.sp_model.PieceToId(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple: return self.sp_model.IdToPiece(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]: UpperCAmelCase_ : List[str] = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip() return out_string def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ) -> str: UpperCAmelCase_ : Optional[int] = kwargs.pop('use_source_tokenizer' , _UpperCamelCase ) UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Any = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) UpperCAmelCase_ : List[str] = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens UpperCAmelCase_ : Dict = ''.join(_UpperCamelCase ) UpperCAmelCase_ : Dict = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: UpperCAmelCase_ : int = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : List[Any] = [self.sep_token_id] UpperCAmelCase_ : Tuple = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is not None: return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1, 1] return ([0] * len(_UpperCamelCase )) + [1, 1] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : Any = [self.sep_token_id] UpperCAmelCase_ : Any = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase_ : List[str] = os.path.join( _UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , 'wb' ) as fi: UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
29
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = '''efficientformer''' def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None: super().__init__(**_UpperCamelCase ) UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = hidden_sizes UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Optional[Any] = depths UpperCAmelCase_ : List[Any] = mlp_expansion_ratio UpperCAmelCase_ : List[str] = downsamples UpperCAmelCase_ : List[Any] = dim UpperCAmelCase_ : Tuple = key_dim UpperCAmelCase_ : Optional[int] = attention_ratio UpperCAmelCase_ : str = resolution UpperCAmelCase_ : Dict = pool_size UpperCAmelCase_ : Union[str, Any] = downsample_patch_size UpperCAmelCase_ : List[str] = downsample_stride UpperCAmelCase_ : List[str] = downsample_pad UpperCAmelCase_ : Any = drop_path_rate UpperCAmelCase_ : Dict = num_metaad_blocks UpperCAmelCase_ : Dict = distillation UpperCAmelCase_ : int = use_layer_scale UpperCAmelCase_ : Any = layer_scale_init_value UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Dict = batch_norm_eps
29
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) __UpperCAmelCase = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[PIL.Image.Image, np.ndarray] class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any: super().__init__() self.register_modules( prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: if latents is None: UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : int = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> int: if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str: if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ): UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 ) if not isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 ) UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state'] UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]: if isinstance(_UpperCamelCase , PIL.Image.Image ): UpperCAmelCase_ : Tuple = 1 elif isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : str = image.shape[0] elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase ) else: raise ValueError( f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : Tuple = self._execution_device UpperCAmelCase_ : str = batch_size * num_images_per_prompt UpperCAmelCase_ : str = guidance_scale > 1.0 UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # prior self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ : int = self.scheduler.timesteps UpperCAmelCase_ : int = self.prior.config.num_embeddings UpperCAmelCase_ : Any = self.prior.config.embedding_dim UpperCAmelCase_ : List[str] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : int = self.prior( _UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding # remove the variance UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = [] for i, latent in enumerate(_UpperCamelCase ): print() UpperCAmelCase_ : List[str] = self.renderer.decode( latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase ) if output_type not in ["np", "pil"]: raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" ) UpperCAmelCase_ : Dict = images.cpu().numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images] # Offload last model to CPU if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_UpperCamelCase )
29
1
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowercase__ ( __snake_case : Namespace ): '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) __UpperCAmelCase = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n' class lowerCamelCase (_snake_case ): '''simple docstring''' @staticmethod def __UpperCAmelCase ( _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Optional[Any] = parser.add_parser( 'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , ) train_parser.add_argument('--model_type' , type=_UpperCamelCase , required=_UpperCamelCase , help='Model\'s type.' ) train_parser.add_argument( '--tf_checkpoint' , type=_UpperCamelCase , required=_UpperCamelCase , help='TensorFlow checkpoint path or folder.' ) train_parser.add_argument( '--pytorch_dump_output' , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to the PyTorch saved model output.' ) train_parser.add_argument('--config' , type=_UpperCamelCase , default='' , help='Configuration file path or folder.' ) train_parser.add_argument( '--finetuning_task_name' , type=_UpperCamelCase , default=_UpperCamelCase , help='Optional fine-tuning task name if the TF model was a finetuned model.' , ) train_parser.set_defaults(func=_UpperCamelCase ) def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ) -> Union[str, Any]: UpperCAmelCase_ : Dict = logging.get_logger('transformers-cli/converting' ) self._logger.info(f"Loading model {model_type}" ) UpperCAmelCase_ : Dict = model_type UpperCAmelCase_ : Optional[int] = tf_checkpoint UpperCAmelCase_ : Tuple = pytorch_dump_output UpperCAmelCase_ : int = config UpperCAmelCase_ : Optional[int] = finetuning_task_name def __UpperCAmelCase ( self ) -> Optional[Any]: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) if "ckpt" in self._tf_checkpoint.lower(): UpperCAmelCase_ : Union[str, Any] = self._tf_checkpoint UpperCAmelCase_ : Tuple = '' else: UpperCAmelCase_ : List[Any] = self._tf_checkpoint UpperCAmelCase_ : str = '' convert_transfo_xl_checkpoint_to_pytorch( _UpperCamelCase , self._config , self._pytorch_dump_output , _UpperCamelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
29
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline _snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) _snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __UpperCAmelCase ( self ) -> Optional[Any]: return self._get_superresolution_dummy_components() def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any: if str(_UpperCamelCase ).startswith('mps' ): UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase ) else: UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __UpperCAmelCase ( self ) -> Dict: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __UpperCAmelCase ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __UpperCAmelCase ( self ) -> List[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __UpperCAmelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
29
1
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json' ), } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Tuple = '''xlm-prophetnet''' _snake_case : Dict = ['''past_key_values'''] _snake_case : List[str] = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self , _UpperCamelCase = 0.1 , _UpperCamelCase = "gelu" , _UpperCamelCase = 3_0_5_2_2 , _UpperCamelCase = 1_0_2_4 , _UpperCamelCase = 4_0_9_6 , _UpperCamelCase = 1_2 , _UpperCamelCase = 1_6 , _UpperCamelCase = 4_0_9_6 , _UpperCamelCase = 1_2 , _UpperCamelCase = 1_6 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 0.02 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 0 , _UpperCamelCase = 2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 1_2_8 , _UpperCamelCase = False , _UpperCamelCase = 0.0 , _UpperCamelCase = True , _UpperCamelCase = 0 , _UpperCamelCase = 1 , _UpperCamelCase = 2 , **_UpperCamelCase , ) -> Tuple: UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim UpperCAmelCase_ : Optional[Any] = num_encoder_layers UpperCAmelCase_ : Optional[int] = num_encoder_attention_heads UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim UpperCAmelCase_ : List[Any] = num_decoder_layers UpperCAmelCase_ : Optional[int] = num_decoder_attention_heads UpperCAmelCase_ : Tuple = max_position_embeddings UpperCAmelCase_ : int = init_std # Normal(0, this parameter) UpperCAmelCase_ : Optional[int] = activation_function # parameters for xlmprophetnet UpperCAmelCase_ : List[Any] = ngram UpperCAmelCase_ : Tuple = num_buckets UpperCAmelCase_ : Optional[Any] = relative_max_distance UpperCAmelCase_ : Any = disable_ngram_loss UpperCAmelCase_ : Optional[Any] = eps # 3 Types of Dropout UpperCAmelCase_ : Any = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : Dict = dropout UpperCAmelCase_ : Dict = use_cache super().__init__( pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , add_cross_attention=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , ) @property def __UpperCAmelCase ( self ) -> int: return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def __UpperCAmelCase ( self , _UpperCamelCase ) -> int: raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and' ' `num_decoder_layers`.' )
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json', 'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json', 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json', 'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json', } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[str, Any] = '''funnel''' _snake_case : int = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', } def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=[4, 4, 4] , _UpperCamelCase=None , _UpperCamelCase=2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=6_4 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu_new" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=None , _UpperCamelCase=1E-9 , _UpperCamelCase="mean" , _UpperCamelCase="relative_shift" , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , **_UpperCamelCase , ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Union[str, Any] = block_sizes UpperCAmelCase_ : Tuple = [1] * len(_UpperCamelCase ) if block_repeats is None else block_repeats assert len(_UpperCamelCase ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." UpperCAmelCase_ : List[Any] = num_decoder_layers UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : Any = n_head UpperCAmelCase_ : List[str] = d_head UpperCAmelCase_ : Dict = d_inner UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : List[str] = hidden_dropout UpperCAmelCase_ : Optional[int] = attention_dropout UpperCAmelCase_ : Any = activation_dropout UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Dict = initializer_std UpperCAmelCase_ : Optional[Any] = layer_norm_eps assert pooling_type in [ "mean", "max", ], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported." UpperCAmelCase_ : Optional[int] = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported." UpperCAmelCase_ : List[Any] = attention_type UpperCAmelCase_ : Optional[Any] = separate_cls UpperCAmelCase_ : int = truncate_seq UpperCAmelCase_ : str = pool_q_only super().__init__(**_UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> List[str]: return sum(self.block_sizes ) @num_hidden_layers.setter def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' ) @property def __UpperCAmelCase ( self ) -> Tuple: return len(self.block_sizes ) @num_blocks.setter def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]: raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
29
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict: logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) UpperCAmelCase_ : Any = model UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase ) def __call__( self , **_UpperCamelCase ) -> str: UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()} return self.model.run(_UpperCamelCase , _UpperCamelCase ) @staticmethod def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) UpperCAmelCase_ : List[str] = 'CPUExecutionProvider' return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict: UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name ) UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase ) if src_path.exists(): UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]: if os.path.isfile(_UpperCamelCase ): logger.error(f"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) # saving model weights/files self._save_pretrained(_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]: UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model( os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) UpperCAmelCase_ : Tuple = Path(_UpperCamelCase ) # load model from hub else: # download model UpperCAmelCase_ : List[str] = hf_hub_download( repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , ) UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) return cls(model=_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]: UpperCAmelCase_ : List[str] = None if len(str(_UpperCamelCase ).split('@' ) ) == 2: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' ) return cls._from_pretrained( model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
29
1
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __UpperCAmelCase = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS) __UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING __UpperCAmelCase = { # used to compute the property `self.chunk_length` 'EncodecConfig': ['overlap'], # used as `self.bert_model = BertModel(config, ...)` 'DPRConfig': True, # not used in modeling files, but it's an important information 'FSMTConfig': ['langs'], # used internally in the configuration class file 'GPTNeoConfig': ['attention_types'], # used internally in the configuration class file 'EsmConfig': ['is_folding_model'], # used during training (despite we don't have training script for these models yet) 'Mask2FormerConfig': ['ignore_value'], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) 'OneFormerConfig': ['ignore_value', 'norm'], # used during preprocessing and collation, see `collating_graphormer.py` 'GraphormerConfig': ['spatial_pos_max'], # used internally in the configuration class file 'T5Config': ['feed_forward_proj'], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally 'MT5Config': ['feed_forward_proj', 'tokenizer_class'], 'UMT5Config': ['feed_forward_proj', 'tokenizer_class'], # used internally in the configuration class file 'LongT5Config': ['feed_forward_proj'], # used internally in the configuration class file 'SwitchTransformersConfig': ['feed_forward_proj'], # having default values other than `1e-5` - we can't fix them without breaking 'BioGptConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'GLPNConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'SegformerConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'CvtConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'PerceiverConfig': ['layer_norm_eps'], # used internally to calculate the feature size 'InformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate the feature size 'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate the feature size 'AutoformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate `mlp_dim` 'SamVisionConfig': ['mlp_ratio'], # For (head) training, but so far not implemented 'ClapAudioConfig': ['num_classes'], # Not used, but providing useful information to users 'SpeechT5HifiGanConfig': ['sampling_rate'], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { 'CLIPSegConfig': True, 'DeformableDetrConfig': True, 'DetaConfig': True, 'DinatConfig': True, 'DonutSwinConfig': True, 'EfficientFormerConfig': True, 'FSMTConfig': True, 'JukeboxConfig': True, 'LayoutLMv2Config': True, 'MaskFormerSwinConfig': True, 'MT5Config': True, 'NatConfig': True, 'OneFormerConfig': True, 'PerceiverConfig': True, 'RagConfig': True, 'SpeechT5Config': True, 'SwinConfig': True, 'Swin2SRConfig': True, 'Swinv2Config': True, 'SwitchTransformersConfig': True, 'TableTransformerConfig': True, 'TapasConfig': True, 'TransfoXLConfig': True, 'UniSpeechConfig': True, 'UniSpeechSatConfig': True, 'WavLMConfig': True, 'WhisperConfig': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) 'JukeboxPriorConfig': True, # TODO: @Younes (for `is_decoder`) 'Pix2StructTextConfig': True, } ) def lowercase__ ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : str = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"config.{attribute}" in modeling_source or F"getattr(config, \"{attribute}\"" in modeling_source or F"getattr(self.config, \"{attribute}\"" in modeling_source ): UpperCAmelCase_ : Dict = True # Deal with multi-line cases elif ( re.search( RF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , __snake_case , ) is not None ): UpperCAmelCase_ : Optional[int] = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: UpperCAmelCase_ : List[Any] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files UpperCAmelCase_ : Union[str, Any] = [ 'bos_index', 'eos_index', 'pad_index', 'unk_index', 'mask_index', 'image_size', 'use_cache', 'out_features', 'out_indices', ] UpperCAmelCase_ : Optional[int] = ['encoder_no_repeat_ngram_size'] # Special cases to be allowed UpperCAmelCase_ : Union[str, Any] = True if not attribute_used: UpperCAmelCase_ : List[str] = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: UpperCAmelCase_ : Any = True elif attribute in ["tie_word_embeddings"] and default_value is False: UpperCAmelCase_ : List[str] = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: UpperCAmelCase_ : List[Any] = True elif attribute.endswith('_token_id' ): UpperCAmelCase_ : Optional[int] = True # configuration class specific cases if not case_allowed: UpperCAmelCase_ : List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) UpperCAmelCase_ : Union[str, Any] = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def lowercase__ ( __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : Dict = dict(inspect.signature(config_class.__init__ ).parameters ) UpperCAmelCase_ : str = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']] UpperCAmelCase_ : Dict = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass UpperCAmelCase_ : Union[str, Any] = {} if len(config_class.attribute_map ) > 0: UpperCAmelCase_ : Tuple = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files UpperCAmelCase_ : Any = inspect.getsourcefile(__snake_case ) UpperCAmelCase_ : Optional[int] = os.path.dirname(__snake_case ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. UpperCAmelCase_ : Any = [os.path.join(__snake_case , __snake_case ) for fn in os.listdir(__snake_case ) if fn.startswith('modeling_' )] # Get the source code strings UpperCAmelCase_ : Any = [] for path in modeling_paths: if os.path.isfile(__snake_case ): with open(__snake_case ) as fp: modeling_sources.append(fp.read() ) UpperCAmelCase_ : Any = [] for config_param, default_value in zip(__snake_case , __snake_case ): # `attributes` here is all the variant names for `config_param` UpperCAmelCase_ : Dict = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(__snake_case , __snake_case , __snake_case , __snake_case ): unused_attributes.append(attributes[0] ) return sorted(__snake_case ) def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Any = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) UpperCAmelCase_ : Dict = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda __snake_case : inspect.isclass(__snake_case ) and issubclass(__snake_case , __snake_case ) and inspect.getmodule(__snake_case ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: UpperCAmelCase_ : int = check_config_attributes_being_used(__snake_case ) if len(__snake_case ) > 0: UpperCAmelCase_ : Optional[Any] = unused_attributes if len(__snake_case ) > 0: UpperCAmelCase_ : int = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n' for name, attributes in configs_with_unused_attributes.items(): error += F"{name}: {attributes}\n" raise ValueError(__snake_case ) if __name__ == "__main__": check_config_attributes()
29
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : Tuple = datasets.Features( { 'tokens': datasets.Sequence(datasets.Value('string' ) ), 'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ), 'answers': datasets.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), 'id': datasets.Value('int64' ), } ) UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict( { 'tokens': [['foo'] * 5] * n, 'labels': [[1] * 5] * n, 'answers': [{'answer_start': [97], 'text': ['1976']}] * 10, 'id': list(range(__snake_case ) ), } , features=__snake_case , ) return dataset @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' ) dataset.map(cache_file_name=__snake_case ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt' UpperCAmelCase_ : Tuple = FILE_CONTENT with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' import bza UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2' UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' ) with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' ) UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' ) with gzip.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lza.frame.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ): '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z' with pyazr.SevenZipFile(__snake_case , 'w' ) as archive: archive.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ): '''simple docstring''' import tarfile UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' import lzma UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lzma.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ): '''simple docstring''' import zipfile UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst' UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' ) with zstd.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml' UpperCAmelCase_ : List[Any] = textwrap.dedent( '\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' ) with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case ) UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' ) dataset.map(cache_file_name=__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' ) with contextlib.closing(sqlitea.connect(__snake_case ) ) as con: UpperCAmelCase_ : List[Any] = con.cursor() cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' ) for item in DATA: cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any ): '''simple docstring''' import bza UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2' with open(__snake_case , 'rb' ) as f: UpperCAmelCase_ : int = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) ) f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' ) UpperCAmelCase_ : Dict = pa.schema( { 'col_1': pa.string(), 'col_2': pa.intaa(), 'col_3': pa.floataa(), } ) with open(__snake_case , 'wb' ) as f: UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case ) UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case ) writer.write_table(__snake_case ) writer.close() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Optional[int] = {'data': DATA} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_312: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_STR: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Dict ): '''simple docstring''' import gzip UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int , __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Any = ['0', '1', '2', '3'] UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3'] UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Dict = ['0', '1', '2', '3'] UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc' with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) ) f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] ) UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' ) @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' ) @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' ) (data_dir / "subdir").mkdir() with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden file with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) return data_dir
29
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def lowercase__ ( __snake_case : str ): '''simple docstring''' if "resnet-50" in model_name: UpperCAmelCase_ : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' ) elif "resnet-101" in model_name: UpperCAmelCase_ : Dict = ResNetConfig.from_pretrained('microsoft/resnet-101' ) else: raise ValueError('Model name should include either resnet50 or resnet101' ) UpperCAmelCase_ : List[Any] = DetrConfig(use_timm_backbone=__snake_case , backbone_config=__snake_case ) # set label attributes UpperCAmelCase_ : int = 'panoptic' in model_name if is_panoptic: UpperCAmelCase_ : Tuple = 250 else: UpperCAmelCase_ : Tuple = 91 UpperCAmelCase_ : int = 'huggingface/label-files' UpperCAmelCase_ : Optional[int] = 'coco-detection-id2label.json' UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) ) UpperCAmelCase_ : int = {int(__snake_case ): v for k, v in idalabel.items()} UpperCAmelCase_ : List[Any] = idalabel UpperCAmelCase_ : Tuple = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : List[str] = [] # stem # fmt: off rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') ) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') ) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') ) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') ) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") ) rename_keys.append( (F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", F"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", F"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) return rename_keys def lowercase__ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__snake_case ) UpperCAmelCase_ : int = val def lowercase__ ( __snake_case : str , __snake_case : Union[str, Any]=False ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = '' if is_panoptic: UpperCAmelCase_ : str = 'detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCAmelCase_ : List[str] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) UpperCAmelCase_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Optional[int] = in_proj_weight[:256, :] UpperCAmelCase_ : str = in_proj_bias[:256] UpperCAmelCase_ : Tuple = in_proj_weight[256:512, :] UpperCAmelCase_ : int = in_proj_bias[256:512] UpperCAmelCase_ : Optional[int] = in_proj_weight[-256:, :] UpperCAmelCase_ : Optional[Any] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCAmelCase_ : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) UpperCAmelCase_ : Dict = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Any = in_proj_weight[:256, :] UpperCAmelCase_ : Optional[int] = in_proj_bias[:256] UpperCAmelCase_ : List[Any] = in_proj_weight[256:512, :] UpperCAmelCase_ : Optional[int] = in_proj_bias[256:512] UpperCAmelCase_ : List[str] = in_proj_weight[-256:, :] UpperCAmelCase_ : Optional[int] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCAmelCase_ : Tuple = state_dict.pop( F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) UpperCAmelCase_ : Optional[Any] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCAmelCase_ : Optional[Any] = in_proj_weight_cross_attn[:256, :] UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[:256] UpperCAmelCase_ : Any = in_proj_weight_cross_attn[256:512, :] UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[256:512] UpperCAmelCase_ : List[Any] = in_proj_weight_cross_attn[-256:, :] UpperCAmelCase_ : int = in_proj_bias_cross_attn[-256:] def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCAmelCase_ : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def lowercase__ ( __snake_case : str , __snake_case : List[Any]=None , __snake_case : Optional[Any]=False ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_detr_config(__snake_case ) # load original model from torch hub UpperCAmelCase_ : Optional[int] = { 'detr-resnet-50': 'detr_resnet50', 'detr-resnet-101': 'detr_resnet101', } logger.info(F"Converting model {model_name}..." ) UpperCAmelCase_ : Tuple = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=__snake_case ).eval() UpperCAmelCase_ : Any = detr.state_dict() # rename keys for src, dest in create_rename_keys(__snake_case ): if is_panoptic: UpperCAmelCase_ : int = 'detr.' + src rename_key(__snake_case , __snake_case , __snake_case ) # query, key and value matrices need special treatment read_in_q_k_v(__snake_case , is_panoptic=__snake_case ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCAmelCase_ : str = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): UpperCAmelCase_ : List[Any] = state_dict.pop(__snake_case ) UpperCAmelCase_ : Dict = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCAmelCase_ : Optional[int] = state_dict.pop(__snake_case ) UpperCAmelCase_ : Union[str, Any] = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: UpperCAmelCase_ : List[Any] = state_dict.pop(__snake_case ) UpperCAmelCase_ : Union[str, Any] = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): UpperCAmelCase_ : Dict = state_dict.pop(__snake_case ) UpperCAmelCase_ : Union[str, Any] = val # finally, create HuggingFace model and load state dict UpperCAmelCase_ : List[Any] = DetrForSegmentation(__snake_case ) if is_panoptic else DetrForObjectDetection(__snake_case ) model.load_state_dict(__snake_case ) model.eval() # verify our conversion on an image UpperCAmelCase_ : int = 'coco_panoptic' if is_panoptic else 'coco_detection' UpperCAmelCase_ : Dict = DetrImageProcessor(format=__snake_case ) UpperCAmelCase_ : Any = processor(images=prepare_img() , return_tensors='pt' ) UpperCAmelCase_ : Optional[int] = encoding['pixel_values'] UpperCAmelCase_ : int = detr(__snake_case ) UpperCAmelCase_ : Any = model(__snake_case ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) if push_to_hub: # Upload model and image processor to the hub logger.info('Uploading PyTorch model and image processor to the hub...' ) model.push_to_hub(F"nielsr/{model_name}" ) processor.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') __UpperCAmelCase = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
29
from __future__ import annotations def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position UpperCAmelCase_ : str = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ : Optional[Any] = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__snake_case ) return permissible_positions def lowercase__ ( __snake_case : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' if is_complete(__snake_case ): return True for position in get_valid_pos(__snake_case , len(__snake_case ) ): UpperCAmelCase_ , UpperCAmelCase_ : Any = position if board[y][x] == 0: UpperCAmelCase_ : Optional[Any] = curr + 1 if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ): return True UpperCAmelCase_ : List[Any] = 0 return False def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): UpperCAmelCase_ : Optional[Any] = 1 if open_knight_tour_helper(__snake_case , (i, j) , 1 ): return board UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
29
1
from __future__ import annotations def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position UpperCAmelCase_ : str = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ : Optional[Any] = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__snake_case ) return permissible_positions def lowercase__ ( __snake_case : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' if is_complete(__snake_case ): return True for position in get_valid_pos(__snake_case , len(__snake_case ) ): UpperCAmelCase_ , UpperCAmelCase_ : Any = position if board[y][x] == 0: UpperCAmelCase_ : Optional[Any] = curr + 1 if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ): return True UpperCAmelCase_ : List[Any] = 0 return False def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): UpperCAmelCase_ : Optional[Any] = 1 if open_knight_tour_helper(__snake_case , (i, j) , 1 ): return board UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
29
def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase_ : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , __snake_case ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __UpperCAmelCase = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: __UpperCAmelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
29
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) self.check_model_type(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {} if padding is not None: UpperCAmelCase_ : List[str] = padding if truncation is not None: UpperCAmelCase_ : Tuple = truncation if top_k is not None: UpperCAmelCase_ : Dict = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int: if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question} else: UpperCAmelCase_ : List[str] = image UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = load_image(inputs['image'] ) UpperCAmelCase_ : Dict = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase ) UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) model_inputs.update(_UpperCamelCase ) return model_inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : Any = self.model(**_UpperCamelCase ) return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str: if top_k > self.model.config.num_labels: UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase_ : Optional[Any] = scores.tolist() UpperCAmelCase_ : Tuple = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
29
1
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowercase__ ( __snake_case : int , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : int = torch.load(__snake_case , map_location='cpu' ) UpperCAmelCase_ : List[str] = chkpt['model'] # We have the base model one level deeper than the original XLM repository UpperCAmelCase_ : int = {} for k, v in state_dict.items(): if "pred_layer" in k: UpperCAmelCase_ : str = v else: UpperCAmelCase_ : List[Any] = v UpperCAmelCase_ : Optional[Any] = chkpt['params'] UpperCAmelCase_ : int = {n: v for n, v in config.items() if not isinstance(__snake_case , (torch.FloatTensor, numpy.ndarray) )} UpperCAmelCase_ : Optional[Any] = chkpt['dico_word2id'] UpperCAmelCase_ : str = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()} # Save pytorch-model UpperCAmelCase_ : Union[str, Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME UpperCAmelCase_ : str = pytorch_dump_folder_path + '/' + CONFIG_NAME UpperCAmelCase_ : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file'] print(F"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(__snake_case , __snake_case ) print(F"Save configuration file to {pytorch_config_dump_path}" ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(__snake_case , indent=2 ) + '\n' ) print(F"Save vocab file to {pytorch_config_dump_path}" ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(__snake_case , indent=2 ) + '\n' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __UpperCAmelCase = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
29
import os # Precomputes a list of the 100 first triangular numbers __UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' ) UpperCAmelCase_ : Union[str, Any] = '' with open(__snake_case ) as f: UpperCAmelCase_ : List[Any] = f.readline() UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] UpperCAmelCase_ : Optional[int] = [ word for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__snake_case ) if __name__ == "__main__": print(solution())
29
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : List[Any] = '''Salesforce/blip-image-captioning-base''' _snake_case : Tuple = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) _snake_case : List[str] = '''image_captioner''' _snake_case : str = AutoModelForVisionaSeq _snake_case : int = ['''image'''] _snake_case : List[str] = ['''text'''] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any: requires_backends(self , ['vision'] ) super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple: return self.pre_processor(images=_UpperCamelCase , return_tensors='pt' ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: return self.model.generate(**_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict: return self.pre_processor.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )[0].strip()
29
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __UpperCAmelCase = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __UpperCAmelCase = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( __snake_case : str ): '''simple docstring''' if "://" in dataset_path: UpperCAmelCase_ : int = dataset_path.split('://' )[1] return dataset_path def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ): '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) ) else: fs.mv(__snake_case , __snake_case , recursive=__snake_case ) def lowercase__ ( ): '''simple docstring''' if hasattr(fsspec.asyn , 'reset_lock' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = threading.Lock()
29
1
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) __UpperCAmelCase = logging.getLogger() def lowercase__ ( __snake_case : Path , __snake_case : list ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = '\n'.join(__snake_case ) Path(__snake_case ).open('w' ).writelines(__snake_case ) __UpperCAmelCase = 'patrickvonplaten/t5-tiny-random' __UpperCAmelCase = 'sshleifer/bart-tiny-random' __UpperCAmelCase = 'sshleifer/tiny-mbart' __UpperCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowerCamelCase (_snake_case ): '''simple docstring''' def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]: UpperCAmelCase_ : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' UpperCAmelCase_ : Optional[int] = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() UpperCAmelCase_ : Tuple = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Dict = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) UpperCAmelCase_ : Optional[Any] = 'translation_en_to_de' if model == T5_TINY else 'summarization' UpperCAmelCase_ : List[Any] = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split() with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ): run_generate() assert Path(_UpperCamelCase ).exists() # os.remove(Path(output_file_name)) def __UpperCAmelCase ( self ) -> Dict: self.run_eval_tester(_UpperCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: self.run_eval_tester(_UpperCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : str = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' UpperCAmelCase_ : Tuple = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() UpperCAmelCase_ : List[Any] = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } UpperCAmelCase_ : List[str] = Path(self.get_auto_remove_tmp_dir() ) UpperCAmelCase_ : Union[str, Any] = str(tmp_dir / 'scores.json' ) UpperCAmelCase_ : Any = str(tmp_dir / 'val.target' ) _dump_articles(_UpperCamelCase , text['en'] ) _dump_articles(_UpperCamelCase , text['de'] ) UpperCAmelCase_ : Any = 'translation_en_to_de' if model == T5_TINY else 'summarization' UpperCAmelCase_ : int = f"\n run_eval_search.py\n {model}\n {str(_UpperCamelCase )}\n {str(_UpperCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ): with CaptureStdout() as cs: run_search() UpperCAmelCase_ : Tuple = [' num_beams | length_penalty', model, 'Best score args'] UpperCAmelCase_ : List[Any] = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(_UpperCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(_UpperCamelCase ).exists() os.remove(Path(_UpperCamelCase ) )
29
def lowercase__ ( __snake_case : list ): '''simple docstring''' for i in range(len(__snake_case ) - 1 , 0 , -1 ): UpperCAmelCase_ : Dict = False for j in range(__snake_case , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j] UpperCAmelCase_ : int = True for j in range(__snake_case ): if unsorted[j] > unsorted[j + 1]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j] UpperCAmelCase_ : Any = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip() __UpperCAmelCase = [int(item) for item in user_input.split(',')] print(F'{cocktail_shaker_sort(unsorted) = }')
29
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available __UpperCAmelCase = {'tokenization_herbert': ['HerbertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['HerbertTokenizerFast'] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ): '''simple docstring''' UpperCAmelCase_ : Dict = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : List[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ): '''simple docstring''' UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) ) UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1 UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] ) UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 ) return image class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]: super().__init__() self.register_modules( unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , ) UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: # get the original timestep using init_timestep UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple: if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase ) UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCAmelCase_ : List[str] = image else: if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Any = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase ) ] UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 ) else: UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase ) UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 ) UpperCAmelCase_ : Tuple = init_latents.shape UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) # get latents UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = init_latents return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : Optional[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=_UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : Dict = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase ) # We'll offload the last model manually. UpperCAmelCase_ : Any = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __UpperCAmelCase ( self ) -> Dict: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str: UpperCAmelCase_ : Any = self._execution_device UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0 if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0] if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase ) if not isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Tuple = [image] if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" ) UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 ) UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents'] UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 ) self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt ) UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor ) UpperCAmelCase_ : Dict = self.prepare_latents( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : str = {'image_embeds': image_embeds} UpperCAmelCase_ : Union[str, Any] = self.unet( sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 ) UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0] # post-processing UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[str] = image * 0.5 + 0.5 UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCamelCase )
29
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ): '''simple docstring''' try: UpperCAmelCase_ : int = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase_ : Optional[int] = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase_ : List[Any] = strtobool(__snake_case ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value __UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skip('Test was skipped' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : Optional[int] ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case ) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case ) def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ): '''simple docstring''' if test_case is None: return partial(__snake_case , version=__snake_case ) return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case ) __UpperCAmelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = True @classmethod def __UpperCAmelCase ( cls ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = tempfile.mkdtemp() @classmethod def __UpperCAmelCase ( cls ) -> List[str]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCAmelCase ( self ) -> str: if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_UpperCamelCase ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Optional[int]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = AcceleratorState() UpperCAmelCase_ : str = tensor[None].clone().to(state.device ) UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu() UpperCAmelCase_ : List[Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __snake_case ): return False return True class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : str = returncode UpperCAmelCase_ : Optional[Any] = stdout UpperCAmelCase_ : Optional[Any] = stderr async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ): '''simple docstring''' while True: UpperCAmelCase_ : Dict = await stream.readline() if line: callback(__snake_case ) else: break async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ): '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : str = [] def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ): UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip() sink.append(__snake_case ) if not quiet: print(__snake_case , __snake_case , file=__snake_case ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ), ] , timeout=__snake_case , ) return _RunOutput(await p.wait() , __snake_case , __snake_case ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ): '''simple docstring''' UpperCAmelCase_ : str = asyncio.get_event_loop() UpperCAmelCase_ : int = loop.run_until_complete( _stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) ) UpperCAmelCase_ : int = ' '.join(__snake_case ) if result.returncode > 0: UpperCAmelCase_ : int = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class lowerCamelCase (_snake_case ): '''simple docstring''' pass def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ): '''simple docstring''' try: UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__snake_case , 'decode' ): UpperCAmelCase_ : str = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
29
1
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=3_2 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ) -> Tuple: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : List[str] = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : int = use_token_type_ids UpperCAmelCase_ : List[Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : Dict = type_vocab_size UpperCAmelCase_ : List[str] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : Tuple = num_choices UpperCAmelCase_ : int = scope def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : str = None if self.use_input_mask: UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : List[Any] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : int = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ) -> Optional[int]: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , use_stable_embedding=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Dict = OpenLlamaModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Dict = model(_UpperCamelCase , attention_mask=_UpperCamelCase ) UpperCAmelCase_ : Dict = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[Any]: UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : int = OpenLlamaModel(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Dict = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , ) UpperCAmelCase_ : List[Any] = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , ) UpperCAmelCase_ : Any = model(_UpperCamelCase , attention_mask=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = OpenLlamaForCausalLM(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : List[str] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str: UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = OpenLlamaForCausalLM(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() # first forward pass UpperCAmelCase_ : Optional[int] = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase , ) UpperCAmelCase_ : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase_ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase_ : Optional[int] = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['hidden_states'][0] UpperCAmelCase_ : str = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['hidden_states'][0] # select random slice UpperCAmelCase_ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Any = config_and_inputs UpperCAmelCase_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase (_snake_case , _snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Optional[Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _snake_case : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else () _snake_case : Union[str, Any] = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _snake_case : List[str] = False _snake_case : Any = False def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : Dict = OpenLlamaModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 ) def __UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Optional[int] = type self.model_tester.create_and_check_model(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : str = 3 UpperCAmelCase_ : Union[str, Any] = input_dict['input_ids'] UpperCAmelCase_ : int = input_ids.ne(1 ).to(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = OpenLlamaForSequenceClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : str = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = 3 UpperCAmelCase_ : List[Any] = 'single_label_classification' UpperCAmelCase_ : int = input_dict['input_ids'] UpperCAmelCase_ : List[str] = input_ids.ne(1 ).to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = OpenLlamaForSequenceClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : str = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : List[str] = 'multi_label_classification' UpperCAmelCase_ : Tuple = input_dict['input_ids'] UpperCAmelCase_ : Tuple = input_ids.ne(1 ).to(_UpperCamelCase ) UpperCAmelCase_ : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Optional[int] = OpenLlamaForSequenceClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def __UpperCAmelCase ( self ) -> Dict: pass @parameterized.expand([('linear',), ('dynamic',)] ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Any = ids_tensor([1, 1_0] , config.vocab_size ) UpperCAmelCase_ : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase_ : str = OpenLlamaModel(_UpperCamelCase ) original_model.to(_UpperCamelCase ) original_model.eval() UpperCAmelCase_ : Any = original_model(_UpperCamelCase ).last_hidden_state UpperCAmelCase_ : Optional[Any] = original_model(_UpperCamelCase ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase_ : Any = {'type': scaling_type, 'factor': 10.0} UpperCAmelCase_ : Optional[int] = OpenLlamaModel(_UpperCamelCase ) scaled_model.to(_UpperCamelCase ) scaled_model.eval() UpperCAmelCase_ : Dict = scaled_model(_UpperCamelCase ).last_hidden_state UpperCAmelCase_ : Union[str, Any] = scaled_model(_UpperCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-5 ) )
29
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __UpperCAmelCase = logging.getLogger(__name__) def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ): '''simple docstring''' def get_dataset(__snake_case : Optional[Any] ): UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCAmelCase_ : Any = get_dataset(__snake_case ) UpperCAmelCase_ : str = get_dataset(__snake_case ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [] for epoch in range(__snake_case ): # Train quickly model.train() for batch in dataloader: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch UpperCAmelCase_ : List[Any] = model(__snake_case ) UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case ) accelerator.backward(__snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCamelCase (nn.Module ): '''simple docstring''' def __init__( self ) -> Optional[Any]: super().__init__() UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) ) UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]: return x * self.a + self.b class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[Any] = DummyModel() UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() # Train baseline UpperCAmelCase_ : Tuple = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' ) accelerator.save_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Any = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders() UpperCAmelCase_ : Optional[Any] = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' ) accelerator.save_state(_UpperCamelCase ) # Load everything back in and make sure all states work accelerator.load_state(_UpperCamelCase ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders() UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : Any = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item() UpperCAmelCase_ : List[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] ) UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] ) UpperCAmelCase_ : Union[str, Any] = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() ) UpperCAmelCase_ : Any = Accelerator() with self.assertRaises(_UpperCamelCase ) as ve: accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() UpperCAmelCase_ : Dict = scheduler.state_dict() train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(_UpperCamelCase , scheduler.state_dict() ) def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[int] = DummyModel() UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 ) # Train baseline UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase = '/tmp/accelerate/state_checkpointing' __UpperCAmelCase = DummyModel() __UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3) __UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) __UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders() __UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert param_device.type == accelerator.device.type __UpperCAmelCase = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
29
1
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase = "cpu" , _UpperCamelCase = "openai/clip-vit-large-patch14" ) -> None: UpperCAmelCase_ : List[Any] = device UpperCAmelCase_ : Any = CLIPTokenizerFast.from_pretrained(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] UpperCAmelCase_ : Union[str, Any] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] UpperCAmelCase_ : Dict = torchvision.transforms.Normalize(self.image_mean , self.image_std ) UpperCAmelCase_ : List[str] = torchvision.transforms.Resize(2_2_4 ) UpperCAmelCase_ : Tuple = torchvision.transforms.CenterCrop(2_2_4 ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Optional[int] = self.resize(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = self.center_crop(_UpperCamelCase ) UpperCAmelCase_ : Dict = self.normalize(_UpperCamelCase ) return images def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> int: UpperCAmelCase_ : Union[str, Any] = self.tokenizer(text=_UpperCamelCase , **_UpperCamelCase ) UpperCAmelCase_ : Tuple = self.preprocess_img(_UpperCamelCase ) UpperCAmelCase_ : int = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowerCamelCase (nn.Module ): '''simple docstring''' def __init__( self , _UpperCamelCase=1_0 , _UpperCamelCase=0.01 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase="image" , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , ) -> None: super().__init__() UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Union[str, Any] = device if device else get_device() if vqgan: UpperCAmelCase_ : List[str] = vqgan else: UpperCAmelCase_ : Dict = load_vqgan(self.device , conf_path=_UpperCamelCase , ckpt_path=_UpperCamelCase ) self.vqgan.eval() if clip: UpperCAmelCase_ : str = clip else: UpperCAmelCase_ : Union[str, Any] = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) UpperCAmelCase_ : Tuple = ProcessorGradientFlow(device=self.device ) UpperCAmelCase_ : Optional[int] = iterations UpperCAmelCase_ : Dict = lr UpperCAmelCase_ : Union[str, Any] = log UpperCAmelCase_ : Union[str, Any] = make_grid UpperCAmelCase_ : Tuple = return_val UpperCAmelCase_ : List[str] = quantize UpperCAmelCase_ : Tuple = self.vqgan.decoder.z_shape def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=5 , _UpperCamelCase=True ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = [] if output_path is None: UpperCAmelCase_ : List[Any] = './animation.gif' if input_path is None: UpperCAmelCase_ : Dict = self.save_path UpperCAmelCase_ : Dict = sorted(glob(input_path + '/*' ) ) if not len(_UpperCamelCase ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(_UpperCamelCase ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) UpperCAmelCase_ : Dict = total_duration / len(_UpperCamelCase ) UpperCAmelCase_ : Tuple = [frame_duration] * len(_UpperCamelCase ) if extend_frames: UpperCAmelCase_ : int = 1.5 UpperCAmelCase_ : str = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(_UpperCamelCase ) ) imageio.mimsave(_UpperCamelCase , _UpperCamelCase , duration=_UpperCamelCase ) print(f"gif saved to {output_path}" ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None ) -> int: if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError UpperCAmelCase_ : Optional[int] = preprocess(Image.open(_UpperCamelCase ) , target_image_size=2_5_6 ).to(self.device ) UpperCAmelCase_ : Tuple = preprocess_vqgan(_UpperCamelCase ) UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.vqgan.encode(_UpperCamelCase ) return z def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Optional[Any] = self.latent.detach().requires_grad_() UpperCAmelCase_ : Optional[int] = base_latent + transform_vector if self.quantize: UpperCAmelCase_ , *UpperCAmelCase_ : str = self.vqgan.quantize(_UpperCamelCase ) else: UpperCAmelCase_ : Union[str, Any] = trans_latent return self.vqgan.decode(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = self.clip_preprocessor(text=_UpperCamelCase , images=_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase ) UpperCAmelCase_ : str = self.clip(**_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = clip_outputs.logits_per_image if weights is not None: UpperCAmelCase_ : Union[str, Any] = similarity_logits * weights return similarity_logits.sum() def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'] , _UpperCamelCase , weights=(1 / pos_prompts['weights']) ) if neg_prompts: UpperCAmelCase_ : Any = self._get_clip_similarity(neg_prompts['prompts'] , _UpperCamelCase , weights=neg_prompts['weights'] ) else: UpperCAmelCase_ : int = torch.tensor([1] , device=self.device ) UpperCAmelCase_ : List[str] = -torch.log(_UpperCamelCase ) + torch.log(_UpperCamelCase ) return loss def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: UpperCAmelCase_ : str = torch.randn_like(self.latent , requires_grad=_UpperCamelCase , device=self.device ) UpperCAmelCase_ : Union[str, Any] = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() UpperCAmelCase_ : Dict = self._add_vector(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = loop_post_process(_UpperCamelCase ) UpperCAmelCase_ : Any = self._get_CLIP_loss(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) print('CLIP loss' , _UpperCamelCase ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=_UpperCamelCase ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: wandb.init(reinit=_UpperCamelCase , project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: UpperCAmelCase_ : Dict = Image.open(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = image.resize((2_5_6, 2_5_6) ) wandb.log('Original Image' , wandb.Image(_UpperCamelCase ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]: if not prompts: return [] UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : str = [] if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : int = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(_UpperCamelCase , (tuple, list) ): UpperCAmelCase_ : Optional[Any] = prompt[0] UpperCAmelCase_ : List[Any] = float(prompt[1] ) elif ":" in prompt: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = prompt.split(':' ) UpperCAmelCase_ : str = float(_UpperCamelCase ) else: UpperCAmelCase_ : int = prompt UpperCAmelCase_ : Union[str, Any] = 1.0 processed_prompts.append(_UpperCamelCase ) weights.append(_UpperCamelCase ) return { "prompts": processed_prompts, "weights": torch.tensor(_UpperCamelCase , device=self.device ), } def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , ) -> List[str]: if image_path: UpperCAmelCase_ : List[str] = self._get_latent(_UpperCamelCase ) else: UpperCAmelCase_ : Union[str, Any] = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) assert pos_prompts, "You must provide at least one positive prompt." UpperCAmelCase_ : Optional[int] = self.process_prompts(_UpperCamelCase ) UpperCAmelCase_ : Any = self.process_prompts(_UpperCamelCase ) if save_final and save_path is None: UpperCAmelCase_ : Any = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) else: UpperCAmelCase_ : int = save_path + '_' + get_timestamp() os.makedirs(_UpperCamelCase ) UpperCAmelCase_ : Any = save_path UpperCAmelCase_ : Optional[int] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(_UpperCamelCase ) ) UpperCAmelCase_ : Union[str, Any] = loop_post_process(_UpperCamelCase ) for iter, transformed_img in enumerate(self._optimize_CLIP(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ): if show_intermediate: show_pil(_UpperCamelCase ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(_UpperCamelCase )} ) if show_final: show_pil(_UpperCamelCase ) if save_final: transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png" ) )
29
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None: warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
29
1
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowercase__ ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : int = {} UpperCAmelCase_ : Tuple = tokenizer(example['content'] , truncation=__snake_case )['input_ids'] UpperCAmelCase_ : List[Any] = len(example['content'] ) / len(output['input_ids'] ) return output __UpperCAmelCase = HfArgumentParser(PretokenizationArguments) __UpperCAmelCase = parser.parse_args() if args.num_workers is None: __UpperCAmelCase = multiprocessing.cpu_count() __UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) __UpperCAmelCase = time.time() __UpperCAmelCase = load_dataset(args.dataset_name, split='train') print(F'Dataset loaded in {time.time()-t_start:.2f}s') __UpperCAmelCase = time.time() __UpperCAmelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F'Dataset tokenized in {time.time()-t_start:.2f}s') __UpperCAmelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
29
def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head: return True # split the list to two parts UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head while fast and fast.next: UpperCAmelCase_ : str = fast.next.next UpperCAmelCase_ : Union[str, Any] = slow.next UpperCAmelCase_ : int = slow.next UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase_ : Tuple = None while second: UpperCAmelCase_ : int = second.next UpperCAmelCase_ : Any = node UpperCAmelCase_ : Optional[Any] = second UpperCAmelCase_ : Tuple = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase_ : Optional[Any] = node.next UpperCAmelCase_ : Dict = head.next return True def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase_ : Any = head while fast and fast.next: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase_ : List[str] = [slow.val] while slow.next: UpperCAmelCase_ : List[str] = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase_ : int = cur.next return True def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head or not head.next: return True UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : int = 0 while head: if head.val in d: d[head.val].append(__snake_case ) else: UpperCAmelCase_ : List[Any] = [pos] UpperCAmelCase_ : Any = head.next pos += 1 UpperCAmelCase_ : Dict = pos - 1 UpperCAmelCase_ : Optional[int] = 0 for v in d.values(): if len(__snake_case ) % 2 != 0: middle += 1 else: UpperCAmelCase_ : int = 0 for i in range(0 , len(__snake_case ) ): if v[i] + v[len(__snake_case ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
29
1
from __future__ import annotations def lowercase__ ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = list(range(len(__snake_case ) ) ) UpperCAmelCase_ : Any = [v / w for v, w in zip(__snake_case , __snake_case )] index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case ) UpperCAmelCase_ : float = 0 UpperCAmelCase_ : list[float] = [0] * len(__snake_case ) for i in index: if weight[i] <= capacity: UpperCAmelCase_ : Optional[Any] = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase_ : str = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
1
from __future__ import annotations def lowercase__ ( __snake_case : str ): '''simple docstring''' return [ord(__snake_case ) - 96 for elem in plain] def lowercase__ ( __snake_case : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[Any] = encode(input('-> ' ).strip().lower() ) print('Encoded: ' , __snake_case ) print('Decoded:' , decode(__snake_case ) ) if __name__ == "__main__": main()
29
__UpperCAmelCase = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
29
1
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=6 , _UpperCamelCase=1_7 , _UpperCamelCase=2_3 , _UpperCamelCase=1_1 , _UpperCamelCase=True , ) -> int: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : Any = act_dim UpperCAmelCase_ : Dict = state_dim UpperCAmelCase_ : List[str] = hidden_size UpperCAmelCase_ : List[Any] = max_length UpperCAmelCase_ : Optional[Any] = is_training def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase_ : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase_ : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase_ : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase_ : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase_ : Optional[Any] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase_ : Dict = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __UpperCAmelCase ( self ) -> Optional[Any]: return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[str]: UpperCAmelCase_ : List[str] = DecisionTransformerModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Dict = model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[str] = config_and_inputs UpperCAmelCase_ : Any = { 'states': states, 'actions': actions, 'rewards': rewards, 'returns_to_go': returns_to_go, 'timesteps': timesteps, 'attention_mask': attention_mask, } return config, inputs_dict @require_torch class lowerCamelCase (_snake_case , _snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : int = (DecisionTransformerModel,) if is_torch_available() else () _snake_case : Tuple = () _snake_case : Any = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids _snake_case : List[Any] = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features _snake_case : Union[str, Any] = False _snake_case : Optional[int] = False _snake_case : List[str] = False _snake_case : Optional[int] = False _snake_case : List[str] = False _snake_case : int = False _snake_case : int = False _snake_case : List[str] = False _snake_case : List[Any] = False def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : str = DecisionTransformerModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 ) def __UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) @slow def __UpperCAmelCase ( self ) -> List[Any]: for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : int = DecisionTransformerModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase_ : List[str] = [ 'states', 'actions', 'rewards', 'returns_to_go', 'timesteps', 'attention_mask', ] self.assertListEqual(arg_names[: len(_UpperCamelCase )] , _UpperCamelCase ) @require_torch class lowerCamelCase (unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase_ : Optional[Any] = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase_ : Tuple = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' ) UpperCAmelCase_ : int = model.to(_UpperCamelCase ) UpperCAmelCase_ : int = model.config torch.manual_seed(0 ) UpperCAmelCase_ : str = torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCamelCase , dtype=torch.floataa ) # env.reset() UpperCAmelCase_ : Tuple = torch.tensor( [[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=_UpperCamelCase ) UpperCAmelCase_ : Tuple = torch.tensor(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase_ : Optional[int] = state UpperCAmelCase_ : str = torch.zeros(1 , 0 , config.act_dim , device=_UpperCamelCase , dtype=torch.floataa ) UpperCAmelCase_ : List[Any] = torch.zeros(1 , 0 , device=_UpperCamelCase , dtype=torch.floataa ) UpperCAmelCase_ : Optional[int] = torch.tensor(0 , device=_UpperCamelCase , dtype=torch.long ).reshape(1 , 1 ) for step in range(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCamelCase )] , dim=1 ) UpperCAmelCase_ : Tuple = torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCamelCase )] , dim=1 ) UpperCAmelCase_ : List[str] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = model( states=_UpperCamelCase , actions=_UpperCamelCase , rewards=_UpperCamelCase , returns_to_go=_UpperCamelCase , timesteps=_UpperCamelCase , attention_mask=_UpperCamelCase , return_dict=_UpperCamelCase , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCamelCase , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase_ : List[str] = action_pred[0, -1] UpperCAmelCase_ : Tuple = torch.cat([states, state] , dim=1 ) UpperCAmelCase_ : Tuple = returns_to_go[0, -1] - reward UpperCAmelCase_ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase_ : Optional[Any] = torch.cat( [timesteps, torch.ones((1, 1) , device=_UpperCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
29
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : "DiagonalGaussianDistribution" class lowerCamelCase (_snake_case , _snake_case ): '''simple docstring''' _snake_case : Optional[int] = True @register_to_config def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]: super().__init__() # pass init params to Encoder UpperCAmelCase_ : List[str] = Encoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , ) # pass init params to Decoder UpperCAmelCase_ : Dict = Decoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , ) UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 ) UpperCAmelCase_ : Any = False UpperCAmelCase_ : int = False # only relevant if vae tiling is enabled UpperCAmelCase_ : Optional[int] = self.config.sample_size UpperCAmelCase_ : int = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) UpperCAmelCase_ : Optional[Any] = 0.25 def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]: if isinstance(_UpperCamelCase , (Encoder, Decoder) ): UpperCAmelCase_ : Union[str, Any] = value def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int: UpperCAmelCase_ : Tuple = use_tiling def __UpperCAmelCase ( self ) -> Dict: self.enable_tiling(_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : str = True def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Optional[int] = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]: UpperCAmelCase_ : Optional[int] = {} def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): UpperCAmelCase_ : Optional[int] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return processors def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() ) if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): if not isinstance(_UpperCamelCase , _UpperCamelCase ): module.set_processor(_UpperCamelCase ) else: module.set_processor(processor.pop(f"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase ) if self.use_slicing and x.shape[0] > 1: UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )] UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase ) UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )] UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase ) for y in range(_UpperCamelCase ): UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase ) for x in range(_UpperCamelCase ): UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. UpperCAmelCase_ : List[str] = [] for i in range(0 , x.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : Any = [] for j in range(0 , x.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : str = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 ) UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. UpperCAmelCase_ : Union[str, Any] = [] for i in range(0 , z.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = [] for j in range(0 , z.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : Optional[Any] = sample UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist if sample_posterior: UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase ) else: UpperCAmelCase_ : int = posterior.mode() UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase )
29
1
from collections.abc import Callable import numpy as np def lowercase__ ( __snake_case : Callable , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ): '''simple docstring''' UpperCAmelCase_ : str = int(np.ceil((x_end - xa) / step_size ) ) UpperCAmelCase_ : Optional[Any] = np.zeros((n + 1,) ) UpperCAmelCase_ : Union[str, Any] = ya UpperCAmelCase_ : List[Any] = xa for k in range(__snake_case ): UpperCAmelCase_ : Any = y[k] + step_size * ode_func(__snake_case , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
29
def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
29
1
from __future__ import annotations __UpperCAmelCase = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> None: UpperCAmelCase_ : Any = graph # mapping node to its parent in resulting breadth first tree UpperCAmelCase_ : dict[str, str | None] = {} UpperCAmelCase_ : List[Any] = source_vertex def __UpperCAmelCase ( self ) -> None: UpperCAmelCase_ : List[str] = {self.source_vertex} UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Optional[Any] = [self.source_vertex] # first in first out queue while queue: UpperCAmelCase_ : List[str] = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_UpperCamelCase ) UpperCAmelCase_ : str = vertex queue.append(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> str: if target_vertex == self.source_vertex: return self.source_vertex UpperCAmelCase_ : Any = self.parent.get(_UpperCamelCase ) if target_vertex_parent is None: UpperCAmelCase_ : Tuple = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(_UpperCamelCase ) return self.shortest_path(_UpperCamelCase ) + f"->{target_vertex}" if __name__ == "__main__": __UpperCAmelCase = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
29
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.txt'} __UpperCAmelCase = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __UpperCAmelCase = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __UpperCAmelCase = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : int = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_INIT_CONFIGURATION _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Any = ConvBertTokenizer def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars ): UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) ) UpperCAmelCase_ : str = do_lower_case UpperCAmelCase_ : List[Any] = strip_accents UpperCAmelCase_ : str = tokenize_chinese_chars UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase ) UpperCAmelCase_ : Any = do_lower_case def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]: UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
29
1
from ..utils import DummyObject, requires_backends class lowerCamelCase (metaclass=_snake_case ): '''simple docstring''' _snake_case : List[Any] = ['''speech'''] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]: requires_backends(self , ['speech'] ) class lowerCamelCase (metaclass=_snake_case ): '''simple docstring''' _snake_case : int = ['''speech'''] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any: requires_backends(self , ['speech'] )
29
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = '''efficientformer''' def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None: super().__init__(**_UpperCamelCase ) UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = hidden_sizes UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Optional[Any] = depths UpperCAmelCase_ : List[Any] = mlp_expansion_ratio UpperCAmelCase_ : List[str] = downsamples UpperCAmelCase_ : List[Any] = dim UpperCAmelCase_ : Tuple = key_dim UpperCAmelCase_ : Optional[int] = attention_ratio UpperCAmelCase_ : str = resolution UpperCAmelCase_ : Dict = pool_size UpperCAmelCase_ : Union[str, Any] = downsample_patch_size UpperCAmelCase_ : List[str] = downsample_stride UpperCAmelCase_ : List[str] = downsample_pad UpperCAmelCase_ : Any = drop_path_rate UpperCAmelCase_ : Dict = num_metaad_blocks UpperCAmelCase_ : Dict = distillation UpperCAmelCase_ : int = use_layer_scale UpperCAmelCase_ : Any = layer_scale_init_value UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Dict = batch_norm_eps
29
1
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Dict = (DDIMParallelScheduler,) _snake_case : List[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 5_0)) def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Union[str, Any]: UpperCAmelCase_ : Dict = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**_UpperCamelCase ) return config def __UpperCAmelCase ( self , **_UpperCamelCase ) -> int: UpperCAmelCase_ : int = self.scheduler_classes[0] UpperCAmelCase_ : Dict = self.get_scheduler_config(**_UpperCamelCase ) UpperCAmelCase_ : Dict = scheduler_class(**_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 1_0, 0.0 UpperCAmelCase_ : List[str] = self.dummy_model() UpperCAmelCase_ : Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(_UpperCamelCase ) for t in scheduler.timesteps: UpperCAmelCase_ : str = model(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample return sample def __UpperCAmelCase ( self ) -> List[str]: for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> str: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCamelCase ) UpperCAmelCase_ : Any = self.scheduler_classes[0] UpperCAmelCase_ : Dict = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ : str = scheduler_class(**_UpperCamelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def __UpperCAmelCase ( self ) -> Optional[Any]: for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> List[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Any: for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> str: self.check_over_configs(thresholding=_UpperCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , ) def __UpperCAmelCase ( self ) -> int: for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Tuple: for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=_UpperCamelCase , num_inference_steps=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_UpperCamelCase , eta=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.scheduler_classes[0] UpperCAmelCase_ : List[str] = self.get_scheduler_config() UpperCAmelCase_ : Any = scheduler_class(**_UpperCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1E-5 def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[Any] = self.scheduler_classes[0] UpperCAmelCase_ : Any = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 1_0, 0.0 scheduler.set_timesteps(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = self.dummy_model() UpperCAmelCase_ : str = self.dummy_sample_deter UpperCAmelCase_ : str = self.dummy_sample_deter + 0.1 UpperCAmelCase_ : List[Any] = self.dummy_sample_deter - 0.1 UpperCAmelCase_ : List[str] = samplea.shape[0] UpperCAmelCase_ : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCAmelCase_ : str = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase ) UpperCAmelCase_ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCAmelCase_ : Dict = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : List[Any] = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Optional[int] = self.full_loop() UpperCAmelCase_ : List[str] = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.full_loop(prediction_type='v_prediction' ) UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def __UpperCAmelCase ( self ) -> Tuple: # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : List[str] = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 ) UpperCAmelCase_ : int = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def __UpperCAmelCase ( self ) -> Union[str, Any]: # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Dict = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 ) UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
29
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[PIL.Image.Image, np.ndarray] class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any: super().__init__() self.register_modules( prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: if latents is None: UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : int = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> int: if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str: if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ): UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 ) if not isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 ) UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state'] UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]: if isinstance(_UpperCamelCase , PIL.Image.Image ): UpperCAmelCase_ : Tuple = 1 elif isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : str = image.shape[0] elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase ) else: raise ValueError( f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : Tuple = self._execution_device UpperCAmelCase_ : str = batch_size * num_images_per_prompt UpperCAmelCase_ : str = guidance_scale > 1.0 UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # prior self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ : int = self.scheduler.timesteps UpperCAmelCase_ : int = self.prior.config.num_embeddings UpperCAmelCase_ : Any = self.prior.config.embedding_dim UpperCAmelCase_ : List[str] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : int = self.prior( _UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding # remove the variance UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = [] for i, latent in enumerate(_UpperCamelCase ): print() UpperCAmelCase_ : List[str] = self.renderer.decode( latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase ) if output_type not in ["np", "pil"]: raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" ) UpperCAmelCase_ : Dict = images.cpu().numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images] # Offload last model to CPU if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_UpperCamelCase )
29
1
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict __UpperCAmelCase = namedtuple( '_TestCommandArgs', [ 'dataset', 'name', 'cache_dir', 'data_dir', 'all_configs', 'save_infos', 'ignore_verifications', 'force_redownload', 'clear_cache', ], defaults=[None, None, None, False, False, False, False, False], ) def lowercase__ ( __snake_case : int , __snake_case : Union[str, Any] ): '''simple docstring''' return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def lowercase__ ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : Tuple = _TestCommandArgs(dataset=__snake_case , all_configs=__snake_case , save_infos=__snake_case ) UpperCAmelCase_ : Optional[int] = TestCommand(*__snake_case ) test_command.run() UpperCAmelCase_ : Optional[int] = os.path.join(__snake_case , 'README.md' ) assert os.path.exists(__snake_case ) UpperCAmelCase_ : str = DatasetInfosDict.from_directory(__snake_case ) UpperCAmelCase_ : List[str] = DatasetInfosDict( { 'default': DatasetInfo( features=Features( { 'tokens': Sequence(Value('string' ) ), 'ner_tags': Sequence( ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ), 'langs': Sequence(Value('string' ) ), 'spans': Sequence(Value('string' ) ), } ) , splits=[ { 'name': 'train', 'num_bytes': 2_351_563, 'num_examples': 10_000, }, { 'name': 'validation', 'num_bytes': 238_418, 'num_examples': 1_000, }, ] , download_size=3_940_680 , dataset_size=2_589_981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = getattr(dataset_infos['default'] , __snake_case ), getattr(expected_dataset_infos['default'] , __snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case , __snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
29
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline _snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) _snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __UpperCAmelCase ( self ) -> Optional[Any]: return self._get_superresolution_dummy_components() def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any: if str(_UpperCamelCase ).startswith('mps' ): UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase ) else: UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __UpperCAmelCase ( self ) -> Dict: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __UpperCAmelCase ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __UpperCAmelCase ( self ) -> List[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __UpperCAmelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
29
1
import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=3_2 , _UpperCamelCase=1_6 , _UpperCamelCase=3 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=3_2 , _UpperCamelCase=4 , _UpperCamelCase=[0, 1, 2, 3] , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=[1, 3_8_4, 2_4, 2_4] , _UpperCamelCase=True , _UpperCamelCase=None , ) -> List[str]: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Dict = batch_size UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : Union[str, Any] = patch_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : int = is_training UpperCAmelCase_ : List[str] = use_labels UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : Dict = backbone_out_indices UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : List[Any] = backbone_featmap_shape UpperCAmelCase_ : Tuple = scope UpperCAmelCase_ : int = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase_ : Tuple = num_patches + 1 def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[Any] = None if self.use_labels: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : str = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [9_6, 1_9_2, 3_8_4, 7_6_8], 'num_groups': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Tuple = DPTModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Optional[Any] = self.num_labels UpperCAmelCase_ : Any = DPTForDepthEstimation(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : str = model(_UpperCamelCase ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = self.num_labels UpperCAmelCase_ : List[Any] = DPTForSemanticSegmentation(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() UpperCAmelCase_ : int = model(_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs UpperCAmelCase_ : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : int = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () _snake_case : List[str] = ( { '''depth-estimation''': DPTForDepthEstimation, '''feature-extraction''': DPTModel, '''image-segmentation''': DPTForSemanticSegmentation, } if is_torch_available() else {} ) _snake_case : Optional[int] = False _snake_case : List[str] = False _snake_case : Union[str, Any] = False def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = DPTModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 ) def __UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds' ) def __UpperCAmelCase ( self ) -> str: pass def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[str] = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(_UpperCamelCase ) UpperCAmelCase_ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Dict = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : str = True if model_class in get_values(_UpperCamelCase ): continue UpperCAmelCase_ : List[Any] = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.train() UpperCAmelCase_ : str = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) UpperCAmelCase_ : int = model(**_UpperCamelCase ).loss loss.backward() def __UpperCAmelCase ( self ) -> str: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = False UpperCAmelCase_ : int = True if model_class in get_values(_UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue UpperCAmelCase_ : List[Any] = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.gradient_checkpointing_enable() model.train() UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) UpperCAmelCase_ : List[str] = model(**_UpperCamelCase ).loss loss.backward() def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : int = _config_zero_init(_UpperCamelCase ) for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(config=_UpperCamelCase ) # Skip the check for the backbone UpperCAmelCase_ : Optional[int] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": UpperCAmelCase_ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __UpperCAmelCase ( self ) -> int: pass @slow def __UpperCAmelCase ( self ) -> Optional[int]: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: UpperCAmelCase_ : Tuple = DPTModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def __UpperCAmelCase ( self ) -> str: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = 'add' with self.assertRaises(_UpperCamelCase ): UpperCAmelCase_ : Any = DPTForDepthEstimation(_UpperCamelCase ) def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : int = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' ) UpperCAmelCase_ : int = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCamelCase ) UpperCAmelCase_ : str = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**_UpperCamelCase ) UpperCAmelCase_ : Dict = outputs.predicted_depth # verify the predicted depth UpperCAmelCase_ : Optional[int] = torch.Size((1, 3_8_4, 3_8_4) ) self.assertEqual(predicted_depth.shape , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = torch.tensor( [[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , _UpperCamelCase , atol=1E-4 ) )
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __UpperCAmelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['MLukeTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict: logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) UpperCAmelCase_ : Any = model UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase ) def __call__( self , **_UpperCamelCase ) -> str: UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()} return self.model.run(_UpperCamelCase , _UpperCamelCase ) @staticmethod def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) UpperCAmelCase_ : List[str] = 'CPUExecutionProvider' return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict: UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name ) UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase ) if src_path.exists(): UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]: if os.path.isfile(_UpperCamelCase ): logger.error(f"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) # saving model weights/files self._save_pretrained(_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]: UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model( os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) UpperCAmelCase_ : Tuple = Path(_UpperCamelCase ) # load model from hub else: # download model UpperCAmelCase_ : List[str] = hf_hub_download( repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , ) UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) return cls(model=_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]: UpperCAmelCase_ : List[str] = None if len(str(_UpperCamelCase ).split('@' ) ) == 2: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' ) return cls._from_pretrained( model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
29
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowerCamelCase (_snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : int = XLNetTokenizer _snake_case : int = XLNetTokenizerFast _snake_case : Union[str, Any] = True _snake_case : List[Any] = True def __UpperCAmelCase ( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase_ : Optional[Any] = XLNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Dict = '<s>' UpperCAmelCase_ : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<eod>' ) self.assertEqual(len(_UpperCamelCase ) , 1_0_0_6 ) def __UpperCAmelCase ( self ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = XLNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase ) UpperCAmelCase_ : Tuple = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ) UpperCAmelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ) UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : List[Any] = XLNetTokenizer(_UpperCamelCase , do_lower_case=_UpperCamelCase ) UpperCAmelCase_ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + '', 'i', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] ) def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : Dict = XLNetTokenizer(_UpperCamelCase , do_lower_case=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) @slow def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : str = XLNetTokenizer.from_pretrained('xlnet-base-cased' ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __UpperCAmelCase ( self ) -> str: # fmt: off UpperCAmelCase_ : List[Any] = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCamelCase , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
29
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : Tuple = datasets.Features( { 'tokens': datasets.Sequence(datasets.Value('string' ) ), 'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ), 'answers': datasets.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), 'id': datasets.Value('int64' ), } ) UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict( { 'tokens': [['foo'] * 5] * n, 'labels': [[1] * 5] * n, 'answers': [{'answer_start': [97], 'text': ['1976']}] * 10, 'id': list(range(__snake_case ) ), } , features=__snake_case , ) return dataset @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' ) dataset.map(cache_file_name=__snake_case ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt' UpperCAmelCase_ : Tuple = FILE_CONTENT with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' import bza UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2' UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' ) with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' ) UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' ) with gzip.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lza.frame.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ): '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z' with pyazr.SevenZipFile(__snake_case , 'w' ) as archive: archive.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ): '''simple docstring''' import tarfile UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' import lzma UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lzma.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ): '''simple docstring''' import zipfile UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst' UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' ) with zstd.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml' UpperCAmelCase_ : List[Any] = textwrap.dedent( '\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' ) with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case ) UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' ) dataset.map(cache_file_name=__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' ) with contextlib.closing(sqlitea.connect(__snake_case ) ) as con: UpperCAmelCase_ : List[Any] = con.cursor() cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' ) for item in DATA: cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any ): '''simple docstring''' import bza UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2' with open(__snake_case , 'rb' ) as f: UpperCAmelCase_ : int = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) ) f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' ) UpperCAmelCase_ : Dict = pa.schema( { 'col_1': pa.string(), 'col_2': pa.intaa(), 'col_3': pa.floataa(), } ) with open(__snake_case , 'wb' ) as f: UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case ) UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case ) writer.write_table(__snake_case ) writer.close() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Optional[int] = {'data': DATA} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_312: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_STR: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Dict ): '''simple docstring''' import gzip UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int , __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Any = ['0', '1', '2', '3'] UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3'] UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Dict = ['0', '1', '2', '3'] UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc' with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) ) f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] ) UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' ) @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' ) @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' ) (data_dir / "subdir").mkdir() with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden file with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) return data_dir
29
1
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): __UpperCAmelCase = True from torch.cuda.amp import autocast __UpperCAmelCase = logging.getLogger(__name__) @dataclass class lowerCamelCase : '''simple docstring''' _snake_case : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _snake_case : Optional[str] = field( default=_snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) _snake_case : Optional[bool] = field( default=_snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) _snake_case : Optional[bool] = field( default=_snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) _snake_case : Optional[float] = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) _snake_case : Optional[float] = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) _snake_case : Optional[float] = field( default=0.999995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def lowercase__ ( __snake_case : ModelArguments , __snake_case : TrainingArguments ): '''simple docstring''' logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCAmelCase_ : List[str] = logging.WARNING if model_args.verbose_logging: UpperCAmelCase_ : Any = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): UpperCAmelCase_ : Any = logging.INFO logger.setLevel(__snake_case ) @dataclass class lowerCamelCase : '''simple docstring''' _snake_case : str = field( default=_snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) _snake_case : Optional[str] = field( default=_snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) _snake_case : Optional[str] = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) _snake_case : Optional[str] = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) _snake_case : Optional[str] = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) _snake_case : bool = field( default=_snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) _snake_case : Optional[int] = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) _snake_case : Optional[int] = field( default=_snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) _snake_case : Optional[float] = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class lowerCamelCase : '''simple docstring''' _snake_case : WavaVecaForPreTraining _snake_case : WavaVecaFeatureExtractor _snake_case : Union[bool, str] = "longest" _snake_case : Optional[int] = None _snake_case : Optional[int] = None def __call__( self , _UpperCamelCase ) -> Dict[str, torch.Tensor]: # reformat list to dict and set to pytorch format UpperCAmelCase_ : int = self.feature_extractor.pad( _UpperCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) UpperCAmelCase_ : Any = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] ) UpperCAmelCase_ : Union[str, Any] = batch['input_values'].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula UpperCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to( torch.long ) UpperCAmelCase_ : str = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device ) # these two operations makes sure that all values # before the output lengths indices are attended to UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Dict = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices UpperCAmelCase_ : int = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCamelCase , min_masks=2 , ) return batch class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=1.0 , **_UpperCamelCase ) -> Dict: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : Any = max_gumbel_temp UpperCAmelCase_ : int = min_gumbel_temp UpperCAmelCase_ : Dict = gumbel_temp_decay def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> torch.Tensor: model.train() UpperCAmelCase_ : int = self._prepare_inputs(_UpperCamelCase ) if self.use_amp: with autocast(): UpperCAmelCase_ : Union[str, Any] = self.compute_loss(_UpperCamelCase , _UpperCamelCase ) else: UpperCAmelCase_ : Any = self.compute_loss(_UpperCamelCase , _UpperCamelCase ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": UpperCAmelCase_ : List[Any] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCAmelCase_ : Optional[Any] = loss.sum() / (inputs['mask_time_indices']).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" ) if self.args.gradient_accumulation_steps > 1: UpperCAmelCase_ : int = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(_UpperCamelCase ).backward() elif self.use_apex: with amp.scale_loss(_UpperCamelCase , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(_UpperCamelCase ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses() configure_logger(__snake_case , __snake_case ) # Downloading and loading a dataset from the hub. UpperCAmelCase_ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" UpperCAmelCase_ : Tuple = DatasetDict() UpperCAmelCase_ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" UpperCAmelCase_ : Optional[int] = DatasetDict() UpperCAmelCase_ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported UpperCAmelCase_ : int = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__snake_case ) def prepare_dataset(__snake_case : Dict ): # check that all files have the correct sampling rate UpperCAmelCase_ , UpperCAmelCase_ : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays UpperCAmelCase_ : List[Any] = datasets.map( __snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names ) # filter audio files that are too long UpperCAmelCase_ : str = vectorized_datasets.filter( lambda __snake_case : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(__snake_case : Union[str, Any] ): return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` UpperCAmelCase_ : Tuple = vectorized_datasets.map( __snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 UpperCAmelCase_ : Tuple = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( 'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and' ' ``config.feat_extract_norm=\'layer\'' ) UpperCAmelCase_ : Optional[Any] = WavaVecaForPreTraining(__snake_case ) UpperCAmelCase_ : Optional[int] = DataCollatorForWavaVecaPretraining(model=__snake_case , feature_extractor=__snake_case ) UpperCAmelCase_ : Optional[int] = WavaVecaPreTrainer( model=__snake_case , data_collator=__snake_case , args=__snake_case , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=__snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
29
from __future__ import annotations def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position UpperCAmelCase_ : str = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ : Optional[Any] = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__snake_case ) return permissible_positions def lowercase__ ( __snake_case : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' if is_complete(__snake_case ): return True for position in get_valid_pos(__snake_case , len(__snake_case ) ): UpperCAmelCase_ , UpperCAmelCase_ : Any = position if board[y][x] == 0: UpperCAmelCase_ : Optional[Any] = curr + 1 if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ): return True UpperCAmelCase_ : List[Any] = 0 return False def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): UpperCAmelCase_ : Optional[Any] = 1 if open_knight_tour_helper(__snake_case , (i, j) , 1 ): return board UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
29
1
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __snake_case : Features ): '''simple docstring''' UpperCAmelCase_ : Dict = np.inf def set_batch_size(__snake_case : FeatureType ) -> None: nonlocal batch_size if isinstance(__snake_case , __snake_case ): UpperCAmelCase_ : Optional[Any] = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__snake_case , __snake_case ): UpperCAmelCase_ : Optional[int] = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__snake_case , __snake_case ) and feature.dtype == "binary": UpperCAmelCase_ : Dict = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__snake_case , __snake_case ) return None if batch_size is np.inf else batch_size class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase , ) -> str: super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) UpperCAmelCase_ : Tuple = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase ) else {self.split: path_or_paths} UpperCAmelCase_ : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1] UpperCAmelCase_ : str = Parquet( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , hash=_UpperCamelCase , **_UpperCamelCase , ) def __UpperCAmelCase ( self ) -> Dict: # Build iterable dataset if self.streaming: UpperCAmelCase_ : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase_ : str = None UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[Any] = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) UpperCAmelCase_ : List[str] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> Union[str, Any]: UpperCAmelCase_ : Dict = dataset UpperCAmelCase_ : Dict = path_or_buf UpperCAmelCase_ : Dict = batch_size or get_writer_batch_size(dataset.features ) UpperCAmelCase_ : Any = parquet_writer_kwargs def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : Optional[Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: UpperCAmelCase_ : Optional[int] = self._write(file_obj=_UpperCamelCase , batch_size=_UpperCamelCase , **self.parquet_writer_kwargs ) else: UpperCAmelCase_ : Any = self._write(file_obj=self.path_or_buf , batch_size=_UpperCamelCase , **self.parquet_writer_kwargs ) return written def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> int: UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : Dict = parquet_writer_kwargs.pop('path_or_buf' , _UpperCamelCase ) UpperCAmelCase_ : int = self.dataset.features.arrow_schema UpperCAmelCase_ : List[str] = pq.ParquetWriter(_UpperCamelCase , schema=_UpperCamelCase , **_UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , _UpperCamelCase ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): UpperCAmelCase_ : Any = query_table( table=self.dataset._data , key=slice(_UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(_UpperCamelCase ) written += batch.nbytes writer.close() return written
29
def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase_ : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , __snake_case ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __UpperCAmelCase = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: __UpperCAmelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
29
1
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __UpperCAmelCase = { 'sample_size': 32, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': 1000, 'block_out_channels': [32, 64], 'attention_head_dim': 8, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __UpperCAmelCase = { 'sample_size': 64, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 3, 'num_class_embeds': 1000, 'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __UpperCAmelCase = { 'sample_size': 256, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': None, 'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'default', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __UpperCAmelCase = { 'num_train_timesteps': 40, 'sigma_min': 0.0_0_2, 'sigma_max': 8_0.0, } __UpperCAmelCase = { 'num_train_timesteps': 201, 'sigma_min': 0.0_0_2, 'sigma_max': 8_0.0, } __UpperCAmelCase = { 'num_train_timesteps': 151, 'sigma_min': 0.0_0_2, 'sigma_max': 8_0.0, } def lowercase__ ( __snake_case : int ): '''simple docstring''' if isinstance(__snake_case , __snake_case ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def lowercase__ ( __snake_case : Any , __snake_case : Dict , __snake_case : List[Any] , __snake_case : str , __snake_case : Dict=False ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = checkpoint[F"{old_prefix}.in_layers.0.weight"] UpperCAmelCase_ : str = checkpoint[F"{old_prefix}.in_layers.0.bias"] UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.in_layers.2.weight"] UpperCAmelCase_ : Optional[Any] = checkpoint[F"{old_prefix}.in_layers.2.bias"] UpperCAmelCase_ : int = checkpoint[F"{old_prefix}.emb_layers.1.weight"] UpperCAmelCase_ : Any = checkpoint[F"{old_prefix}.emb_layers.1.bias"] UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.out_layers.0.weight"] UpperCAmelCase_ : Tuple = checkpoint[F"{old_prefix}.out_layers.0.bias"] UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.out_layers.3.weight"] UpperCAmelCase_ : Optional[int] = checkpoint[F"{old_prefix}.out_layers.3.bias"] if has_skip: UpperCAmelCase_ : Tuple = checkpoint[F"{old_prefix}.skip_connection.weight"] UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.skip_connection.bias"] return new_checkpoint def lowercase__ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict=None ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 ) UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.norm.weight"] UpperCAmelCase_ : int = checkpoint[F"{old_prefix}.norm.bias"] UpperCAmelCase_ : Tuple = weight_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : str = bias_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : Any = weight_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : Optional[Any] = bias_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : Any = ( checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 ) ) UpperCAmelCase_ : Optional[int] = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowercase__ ( __snake_case : str , __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Any = torch.load(__snake_case , map_location='cpu' ) UpperCAmelCase_ : int = {} UpperCAmelCase_ : Optional[int] = checkpoint['time_embed.0.weight'] UpperCAmelCase_ : str = checkpoint['time_embed.0.bias'] UpperCAmelCase_ : str = checkpoint['time_embed.2.weight'] UpperCAmelCase_ : str = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: UpperCAmelCase_ : Any = checkpoint['label_emb.weight'] UpperCAmelCase_ : Dict = checkpoint['input_blocks.0.0.weight'] UpperCAmelCase_ : List[str] = checkpoint['input_blocks.0.0.bias'] UpperCAmelCase_ : List[Any] = unet_config['down_block_types'] UpperCAmelCase_ : Any = unet_config['layers_per_block'] UpperCAmelCase_ : Optional[Any] = unet_config['attention_head_dim'] UpperCAmelCase_ : Union[str, Any] = unet_config['block_out_channels'] UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : str = channels_list[0] for i, layer_type in enumerate(__snake_case ): UpperCAmelCase_ : List[Any] = channels_list[i] UpperCAmelCase_ : Union[str, Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__snake_case ): UpperCAmelCase_ : Tuple = F"down_blocks.{i}.resnets.{j}" UpperCAmelCase_ : Dict = F"input_blocks.{current_layer}.0" UpperCAmelCase_ : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ : Optional[int] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__snake_case ): UpperCAmelCase_ : Optional[int] = F"down_blocks.{i}.resnets.{j}" UpperCAmelCase_ : str = F"input_blocks.{current_layer}.0" UpperCAmelCase_ : Dict = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case ) UpperCAmelCase_ : Dict = F"down_blocks.{i}.attentions.{j}" UpperCAmelCase_ : List[Any] = F"input_blocks.{current_layer}.1" UpperCAmelCase_ : List[str] = convert_attention( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) current_layer += 1 if i != len(__snake_case ) - 1: UpperCAmelCase_ : Dict = F"down_blocks.{i}.downsamplers.0" UpperCAmelCase_ : Optional[Any] = F"input_blocks.{current_layer}.0" UpperCAmelCase_ : Any = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) current_layer += 1 UpperCAmelCase_ : Optional[int] = current_channels # hardcoded the mid-block for now UpperCAmelCase_ : int = 'mid_block.resnets.0' UpperCAmelCase_ : Optional[Any] = 'middle_block.0' UpperCAmelCase_ : Union[str, Any] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) UpperCAmelCase_ : Optional[Any] = 'mid_block.attentions.0' UpperCAmelCase_ : Union[str, Any] = 'middle_block.1' UpperCAmelCase_ : List[Any] = convert_attention(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) UpperCAmelCase_ : int = 'mid_block.resnets.1' UpperCAmelCase_ : Dict = 'middle_block.2' UpperCAmelCase_ : str = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) UpperCAmelCase_ : int = 0 UpperCAmelCase_ : Union[str, Any] = unet_config['up_block_types'] for i, layer_type in enumerate(__snake_case ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ : Dict = F"up_blocks.{i}.resnets.{j}" UpperCAmelCase_ : Dict = F"output_blocks.{current_layer}.0" UpperCAmelCase_ : Dict = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case ) current_layer += 1 if i != len(__snake_case ) - 1: UpperCAmelCase_ : Dict = F"up_blocks.{i}.upsamplers.0" UpperCAmelCase_ : Optional[Any] = F"output_blocks.{current_layer-1}.1" UpperCAmelCase_ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ : Optional[int] = F"up_blocks.{i}.resnets.{j}" UpperCAmelCase_ : List[Any] = F"output_blocks.{current_layer}.0" UpperCAmelCase_ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case ) UpperCAmelCase_ : List[str] = F"up_blocks.{i}.attentions.{j}" UpperCAmelCase_ : Optional[Any] = F"output_blocks.{current_layer}.1" UpperCAmelCase_ : Optional[Any] = convert_attention( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) current_layer += 1 if i != len(__snake_case ) - 1: UpperCAmelCase_ : List[str] = F"up_blocks.{i}.upsamplers.0" UpperCAmelCase_ : List[str] = F"output_blocks.{current_layer-1}.2" UpperCAmelCase_ : str = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) UpperCAmelCase_ : Optional[Any] = checkpoint['out.0.weight'] UpperCAmelCase_ : str = checkpoint['out.0.bias'] UpperCAmelCase_ : List[str] = checkpoint['out.2.weight'] UpperCAmelCase_ : str = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = strabool(args.class_cond) __UpperCAmelCase = os.path.basename(args.unet_path) print(F'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: __UpperCAmelCase = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __UpperCAmelCase = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __UpperCAmelCase = TEST_UNET_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: __UpperCAmelCase = None __UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config) __UpperCAmelCase = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __UpperCAmelCase = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') __UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config) __UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
29
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) self.check_model_type(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {} if padding is not None: UpperCAmelCase_ : List[str] = padding if truncation is not None: UpperCAmelCase_ : Tuple = truncation if top_k is not None: UpperCAmelCase_ : Dict = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int: if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question} else: UpperCAmelCase_ : List[str] = image UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = load_image(inputs['image'] ) UpperCAmelCase_ : Dict = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase ) UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) model_inputs.update(_UpperCamelCase ) return model_inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : Any = self.model(**_UpperCamelCase ) return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str: if top_k > self.model.config.num_labels: UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase_ : Optional[Any] = scores.tolist() UpperCAmelCase_ : Tuple = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
29
1
from __future__ import annotations __UpperCAmelCase = list[list[int]] # assigning initial values to the grid __UpperCAmelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCAmelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def lowercase__ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ): '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def lowercase__ ( __snake_case : Matrix ): '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def lowercase__ ( __snake_case : Matrix ): '''simple docstring''' if location := find_empty_location(__snake_case ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): UpperCAmelCase_ : Any = digit if sudoku(__snake_case ) is not None: return grid UpperCAmelCase_ : Tuple = 0 return None def lowercase__ ( __snake_case : Matrix ): '''simple docstring''' for row in grid: for cell in row: print(__snake_case , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('\nExample grid:\n' + '=' * 20) print_solution(example_grid) print('\nExample grid solution:') __UpperCAmelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('Cannot find a solution.')
29
import os # Precomputes a list of the 100 first triangular numbers __UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' ) UpperCAmelCase_ : Union[str, Any] = '' with open(__snake_case ) as f: UpperCAmelCase_ : List[Any] = f.readline() UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] UpperCAmelCase_ : Optional[int] = [ word for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__snake_case ) if __name__ == "__main__": print(solution())
29
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json', } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Dict = '''xlnet''' _snake_case : Optional[int] = ['''mems'''] _snake_case : List[str] = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _UpperCamelCase=3_2_0_0_0 , _UpperCamelCase=1_0_2_4 , _UpperCamelCase=2_4 , _UpperCamelCase=1_6 , _UpperCamelCase=4_0_9_6 , _UpperCamelCase="gelu" , _UpperCamelCase=True , _UpperCamelCase="bi" , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=-1 , _UpperCamelCase=False , _UpperCamelCase="last" , _UpperCamelCase=True , _UpperCamelCase="tanh" , _UpperCamelCase=0.1 , _UpperCamelCase=5 , _UpperCamelCase=5 , _UpperCamelCase=5 , _UpperCamelCase=1 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> Any: UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : int = d_model UpperCAmelCase_ : Any = n_layer UpperCAmelCase_ : Tuple = n_head if d_model % n_head != 0: raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) UpperCAmelCase_ : Dict = d_model // n_head UpperCAmelCase_ : int = ff_activation UpperCAmelCase_ : Tuple = d_inner UpperCAmelCase_ : Any = untie_r UpperCAmelCase_ : Optional[int] = attn_type UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : int = dropout UpperCAmelCase_ : Optional[int] = mem_len UpperCAmelCase_ : str = reuse_len UpperCAmelCase_ : List[Any] = bi_data UpperCAmelCase_ : Tuple = clamp_len UpperCAmelCase_ : Dict = same_length UpperCAmelCase_ : int = summary_type UpperCAmelCase_ : Optional[Any] = summary_use_proj UpperCAmelCase_ : List[str] = summary_activation UpperCAmelCase_ : Dict = summary_last_dropout UpperCAmelCase_ : str = start_n_top UpperCAmelCase_ : str = end_n_top UpperCAmelCase_ : Any = bos_token_id UpperCAmelCase_ : Tuple = pad_token_id UpperCAmelCase_ : str = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCamelCase , ) UpperCAmelCase_ : Any = kwargs['use_cache'] UpperCAmelCase_ : Union[str, Any] = use_mems_eval UpperCAmelCase_ : Union[str, Any] = use_mems_train super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> Tuple: logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: # Message copied from Transformer-XL documentation raise NotImplementedError( f"The model {self.model_type} is one of the few models that has no sequence length limit." )
29
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __UpperCAmelCase = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __UpperCAmelCase = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( __snake_case : str ): '''simple docstring''' if "://" in dataset_path: UpperCAmelCase_ : int = dataset_path.split('://' )[1] return dataset_path def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ): '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) ) else: fs.mv(__snake_case , __snake_case , recursive=__snake_case ) def lowercase__ ( ): '''simple docstring''' if hasattr(fsspec.asyn , 'reset_lock' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = threading.Lock()
29
1
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowercase__ ( __snake_case : Optional[int] ): # picklable for multiprocessing '''simple docstring''' return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowercase__ ( ): '''simple docstring''' with parallel_backend('spark' ): assert ParallelBackendConfig.backend_name == "spark" UpperCAmelCase_ : str = [1, 2, 3] with pytest.raises(__snake_case ): with parallel_backend('unsupported backend' ): map_nested(__snake_case , __snake_case , num_proc=2 ) with pytest.raises(__snake_case ): with parallel_backend('unsupported backend' ): map_nested(__snake_case , __snake_case , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('num_proc' , [2, -1] ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : Dict = [1, 2] UpperCAmelCase_ : Any = {'a': 1, 'b': 2} UpperCAmelCase_ : Any = {'a': [1, 2], 'b': [3, 4]} UpperCAmelCase_ : Optional[Any] = {'a': {'1': 1}, 'b': 2} UpperCAmelCase_ : Optional[int] = {'a': 1, 'b': 2, 'c': 3, 'd': 4} UpperCAmelCase_ : Tuple = [2, 3] UpperCAmelCase_ : Optional[int] = {'a': 2, 'b': 3} UpperCAmelCase_ : Optional[int] = {'a': [2, 3], 'b': [4, 5]} UpperCAmelCase_ : int = {'a': {'1': 2}, 'b': 3} UpperCAmelCase_ : str = {'a': 2, 'b': 3, 'c': 4, 'd': 5} with parallel_backend('spark' ): assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
29
def lowercase__ ( __snake_case : list ): '''simple docstring''' for i in range(len(__snake_case ) - 1 , 0 , -1 ): UpperCAmelCase_ : Dict = False for j in range(__snake_case , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j] UpperCAmelCase_ : int = True for j in range(__snake_case ): if unsorted[j] > unsorted[j + 1]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j] UpperCAmelCase_ : Any = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip() __UpperCAmelCase = [int(item) for item in user_input.split(',')] print(F'{cocktail_shaker_sort(unsorted) = }')
29
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCAmelCase = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ): '''simple docstring''' UpperCAmelCase_ : Dict = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : List[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ): '''simple docstring''' UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) ) UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1 UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] ) UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 ) return image class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]: super().__init__() self.register_modules( unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , ) UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: # get the original timestep using init_timestep UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple: if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase ) UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCAmelCase_ : List[str] = image else: if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Any = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase ) ] UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 ) else: UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase ) UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 ) UpperCAmelCase_ : Tuple = init_latents.shape UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) # get latents UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = init_latents return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : Optional[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=_UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : Dict = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase ) # We'll offload the last model manually. UpperCAmelCase_ : Any = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __UpperCAmelCase ( self ) -> Dict: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str: UpperCAmelCase_ : Any = self._execution_device UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0 if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0] if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase ) if not isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Tuple = [image] if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" ) UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 ) UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents'] UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 ) self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt ) UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor ) UpperCAmelCase_ : Dict = self.prepare_latents( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : str = {'image_embeds': image_embeds} UpperCAmelCase_ : Union[str, Any] = self.unet( sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 ) UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0] # post-processing UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[str] = image * 0.5 + 0.5 UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCamelCase )
29
1
def lowercase__ ( __snake_case : int = 600_851_475_143 ): '''simple docstring''' try: UpperCAmelCase_ : Tuple = int(__snake_case ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) UpperCAmelCase_ : List[Any] = 2 UpperCAmelCase_ : str = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 UpperCAmelCase_ : str = i while n % i == 0: UpperCAmelCase_ : List[Any] = n // i i += 1 return int(__snake_case ) if __name__ == "__main__": print(F'{solution() = }')
29
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ): '''simple docstring''' try: UpperCAmelCase_ : int = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase_ : Optional[int] = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase_ : List[Any] = strtobool(__snake_case ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value __UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skip('Test was skipped' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : Optional[int] ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case ) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case ) def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ): '''simple docstring''' if test_case is None: return partial(__snake_case , version=__snake_case ) return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case ) __UpperCAmelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = True @classmethod def __UpperCAmelCase ( cls ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = tempfile.mkdtemp() @classmethod def __UpperCAmelCase ( cls ) -> List[str]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCAmelCase ( self ) -> str: if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_UpperCamelCase ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Optional[int]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = AcceleratorState() UpperCAmelCase_ : str = tensor[None].clone().to(state.device ) UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu() UpperCAmelCase_ : List[Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __snake_case ): return False return True class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : str = returncode UpperCAmelCase_ : Optional[Any] = stdout UpperCAmelCase_ : Optional[Any] = stderr async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ): '''simple docstring''' while True: UpperCAmelCase_ : Dict = await stream.readline() if line: callback(__snake_case ) else: break async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ): '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : str = [] def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ): UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip() sink.append(__snake_case ) if not quiet: print(__snake_case , __snake_case , file=__snake_case ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ), ] , timeout=__snake_case , ) return _RunOutput(await p.wait() , __snake_case , __snake_case ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ): '''simple docstring''' UpperCAmelCase_ : str = asyncio.get_event_loop() UpperCAmelCase_ : int = loop.run_until_complete( _stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) ) UpperCAmelCase_ : int = ' '.join(__snake_case ) if result.returncode > 0: UpperCAmelCase_ : int = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class lowerCamelCase (_snake_case ): '''simple docstring''' pass def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ): '''simple docstring''' try: UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__snake_case , 'decode' ): UpperCAmelCase_ : str = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
29
1
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {} __UpperCAmelCase = {} __UpperCAmelCase = {} def lowercase__ ( __snake_case : type , __snake_case : Optional[str] , __snake_case : Optional[List[str]] = None , ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) UpperCAmelCase_ : Dict = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) UpperCAmelCase_ : int = format_type def lowercase__ ( __snake_case : Exception , __snake_case : Optional[str] , __snake_case : Optional[List[str]] = None ): '''simple docstring''' UpperCAmelCase_ : Tuple = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): UpperCAmelCase_ : Union[str, Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: __UpperCAmelCase = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: __UpperCAmelCase = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: __UpperCAmelCase = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def lowercase__ ( __snake_case : Optional[str] ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def lowercase__ ( __snake_case : Optional[str] , **__snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : List[str] = get_format_type_from_alias(__snake_case ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**__snake_case ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
29
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __UpperCAmelCase = logging.getLogger(__name__) def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ): '''simple docstring''' def get_dataset(__snake_case : Optional[Any] ): UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCAmelCase_ : Any = get_dataset(__snake_case ) UpperCAmelCase_ : str = get_dataset(__snake_case ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [] for epoch in range(__snake_case ): # Train quickly model.train() for batch in dataloader: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch UpperCAmelCase_ : List[Any] = model(__snake_case ) UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case ) accelerator.backward(__snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCamelCase (nn.Module ): '''simple docstring''' def __init__( self ) -> Optional[Any]: super().__init__() UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) ) UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]: return x * self.a + self.b class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[Any] = DummyModel() UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() # Train baseline UpperCAmelCase_ : Tuple = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' ) accelerator.save_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Any = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders() UpperCAmelCase_ : Optional[Any] = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' ) accelerator.save_state(_UpperCamelCase ) # Load everything back in and make sure all states work accelerator.load_state(_UpperCamelCase ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders() UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : Any = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item() UpperCAmelCase_ : List[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] ) UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] ) UpperCAmelCase_ : Union[str, Any] = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() ) UpperCAmelCase_ : Any = Accelerator() with self.assertRaises(_UpperCamelCase ) as ve: accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() UpperCAmelCase_ : Dict = scheduler.state_dict() train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(_UpperCamelCase , scheduler.state_dict() ) def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[int] = DummyModel() UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 ) # Train baseline UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase = '/tmp/accelerate/state_checkpointing' __UpperCAmelCase = DummyModel() __UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3) __UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) __UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders() __UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert param_device.type == accelerator.device.type __UpperCAmelCase = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
29
1
from graphs.minimum_spanning_tree_kruskal import kruskal def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : str = 9 UpperCAmelCase_ : Tuple = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] UpperCAmelCase_ : List[str] = kruskal(__snake_case , __snake_case ) UpperCAmelCase_ : str = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(__snake_case ) == sorted(__snake_case )
29
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None: warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
29
1
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Tuple = IFInpaintingPipeline _snake_case : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _snake_case : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __UpperCAmelCase ( self ) -> Dict: return self._get_dummy_components() def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> int: if str(_UpperCamelCase ).startswith('mps' ): UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase ) else: UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) UpperCAmelCase_ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : List[str] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __UpperCAmelCase ( self ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __UpperCAmelCase ( self ) -> Optional[int]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __UpperCAmelCase ( self ) -> Optional[int]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __UpperCAmelCase ( self ) -> Dict: self._test_save_load_local() def __UpperCAmelCase ( self ) -> List[str]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
29
def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head: return True # split the list to two parts UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head while fast and fast.next: UpperCAmelCase_ : str = fast.next.next UpperCAmelCase_ : Union[str, Any] = slow.next UpperCAmelCase_ : int = slow.next UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase_ : Tuple = None while second: UpperCAmelCase_ : int = second.next UpperCAmelCase_ : Any = node UpperCAmelCase_ : Optional[Any] = second UpperCAmelCase_ : Tuple = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase_ : Optional[Any] = node.next UpperCAmelCase_ : Dict = head.next return True def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase_ : Any = head while fast and fast.next: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase_ : List[str] = [slow.val] while slow.next: UpperCAmelCase_ : List[str] = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase_ : int = cur.next return True def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head or not head.next: return True UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : int = 0 while head: if head.val in d: d[head.val].append(__snake_case ) else: UpperCAmelCase_ : List[Any] = [pos] UpperCAmelCase_ : Any = head.next pos += 1 UpperCAmelCase_ : Dict = pos - 1 UpperCAmelCase_ : Optional[int] = 0 for v in d.values(): if len(__snake_case ) % 2 != 0: middle += 1 else: UpperCAmelCase_ : int = 0 for i in range(0 , len(__snake_case ) ): if v[i] + v[len(__snake_case ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
29
1
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n' __UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n' __UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def lowercase__ ( __snake_case : int , __snake_case : Dict ): '''simple docstring''' return float((preds == labels).mean() ) def lowercase__ ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]="binary" ): '''simple docstring''' UpperCAmelCase_ : str = simple_accuracy(__snake_case , __snake_case ) UpperCAmelCase_ : Dict = float(fa_score(y_true=__snake_case , y_pred=__snake_case , average=__snake_case ) ) return { "accuracy": acc, "f1": fa, } def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : Tuple = {} for id_pred, label in zip(__snake_case , __snake_case ): UpperCAmelCase_ : str = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}" UpperCAmelCase_ : Tuple = id_pred['prediction'] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCAmelCase_ : List[str] = [(pred, label)] UpperCAmelCase_ , UpperCAmelCase_ : List[str] = [], [] for question, preds_labels in question_map.items(): UpperCAmelCase_ , UpperCAmelCase_ : str = zip(*__snake_case ) UpperCAmelCase_ : List[Any] = fa_score(y_true=__snake_case , y_pred=__snake_case , average='macro' ) fas.append(__snake_case ) UpperCAmelCase_ : int = int(sum(pred == label for pred, label in preds_labels ) == len(__snake_case ) ) ems.append(__snake_case ) UpperCAmelCase_ : int = float(sum(__snake_case ) / len(__snake_case ) ) UpperCAmelCase_ : Optional[int] = sum(__snake_case ) / len(__snake_case ) UpperCAmelCase_ : List[str] = float(fa_score(y_true=__snake_case , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase (datasets.Metric ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Any: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , ) def __UpperCAmelCase ( self ) -> List[Any]: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "prediction_text": datasets.Value('string' ), }, "references": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "answers": datasets.Sequence(datasets.Value('string' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('int64' ), "paragraph": datasets.Value('int64' ), "question": datasets.Value('int64' ), }, "prediction": datasets.Value('int64' ), }, "references": datasets.Value('int64' ), } else: return { "predictions": datasets.Value('int64' ), "references": datasets.Value('int64' ), } def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_UpperCamelCase , _UpperCamelCase )} elif self.config_name == "cb": return acc_and_fa(_UpperCamelCase , _UpperCamelCase , fa_avg='macro' ) elif self.config_name == "record": UpperCAmelCase_ : str = [ { 'qas': [ {'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]} for ref in references ] } ] UpperCAmelCase_ : Any = {pred['idx']['query']: pred['prediction_text'] for pred in predictions} return evaluate_record(_UpperCamelCase , _UpperCamelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(_UpperCamelCase , _UpperCamelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_UpperCamelCase , _UpperCamelCase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.txt'} __UpperCAmelCase = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __UpperCAmelCase = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __UpperCAmelCase = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : int = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_INIT_CONFIGURATION _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Any = ConvBertTokenizer def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars ): UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) ) UpperCAmelCase_ : str = do_lower_case UpperCAmelCase_ : List[Any] = strip_accents UpperCAmelCase_ : str = tokenize_chinese_chars UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase ) UpperCAmelCase_ : Any = do_lower_case def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]: UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
29
__UpperCAmelCase = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
29
1
from __future__ import annotations __UpperCAmelCase = list[tuple[int, int]] __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[Any]: UpperCAmelCase_ : str = pos_x UpperCAmelCase_ : str = pos_y UpperCAmelCase_ : int = (pos_y, pos_x) UpperCAmelCase_ : str = goal_x UpperCAmelCase_ : Optional[int] = goal_y UpperCAmelCase_ : Tuple = g_cost UpperCAmelCase_ : str = parent UpperCAmelCase_ : int = self.calculate_heuristic() def __UpperCAmelCase ( self ) -> float: UpperCAmelCase_ : int = abs(self.pos_x - self.goal_x ) UpperCAmelCase_ : Optional[int] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , _UpperCamelCase ) -> bool: return self.f_cost < other.f_cost class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = [self.start] UpperCAmelCase_ : list[Node] = [] UpperCAmelCase_ : List[str] = False def __UpperCAmelCase ( self ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase_ : Optional[int] = True return self.retrace_path(_UpperCamelCase ) self.closed_nodes.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.get_successors(_UpperCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_UpperCamelCase ) else: # retrieve the best current path UpperCAmelCase_ : int = self.open_nodes.pop(self.open_nodes.index(_UpperCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_UpperCamelCase ) else: self.open_nodes.append(_UpperCamelCase ) if not self.reached: return [self.start.pos] return None def __UpperCAmelCase ( self , _UpperCamelCase ) -> list[Node]: UpperCAmelCase_ : int = [] for action in delta: UpperCAmelCase_ : str = parent.pos_x + action[1] UpperCAmelCase_ : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _UpperCamelCase , _UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _UpperCamelCase , ) ) return successors def __UpperCAmelCase ( self , _UpperCamelCase ) -> Path: UpperCAmelCase_ : Any = node UpperCAmelCase_ : Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Dict = current_node.parent path.reverse() return path if __name__ == "__main__": __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') __UpperCAmelCase = GreedyBestFirst(init, goal) __UpperCAmelCase = greedy_bf.search() if path: for pos_x, pos_y in path: __UpperCAmelCase = 2 for elem in grid: print(elem)
29
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : "DiagonalGaussianDistribution" class lowerCamelCase (_snake_case , _snake_case ): '''simple docstring''' _snake_case : Optional[int] = True @register_to_config def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]: super().__init__() # pass init params to Encoder UpperCAmelCase_ : List[str] = Encoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , ) # pass init params to Decoder UpperCAmelCase_ : Dict = Decoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , ) UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 ) UpperCAmelCase_ : Any = False UpperCAmelCase_ : int = False # only relevant if vae tiling is enabled UpperCAmelCase_ : Optional[int] = self.config.sample_size UpperCAmelCase_ : int = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) UpperCAmelCase_ : Optional[Any] = 0.25 def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]: if isinstance(_UpperCamelCase , (Encoder, Decoder) ): UpperCAmelCase_ : Union[str, Any] = value def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int: UpperCAmelCase_ : Tuple = use_tiling def __UpperCAmelCase ( self ) -> Dict: self.enable_tiling(_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : str = True def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Optional[int] = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]: UpperCAmelCase_ : Optional[int] = {} def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): UpperCAmelCase_ : Optional[int] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return processors def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() ) if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): if not isinstance(_UpperCamelCase , _UpperCamelCase ): module.set_processor(_UpperCamelCase ) else: module.set_processor(processor.pop(f"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase ) if self.use_slicing and x.shape[0] > 1: UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )] UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase ) UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )] UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase ) for y in range(_UpperCamelCase ): UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase ) for x in range(_UpperCamelCase ): UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. UpperCAmelCase_ : List[str] = [] for i in range(0 , x.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : Any = [] for j in range(0 , x.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : str = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 ) UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. UpperCAmelCase_ : Union[str, Any] = [] for i in range(0 , z.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = [] for j in range(0 , z.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : Optional[Any] = sample UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist if sample_posterior: UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase ) else: UpperCAmelCase_ : int = posterior.mode() UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase )
29
1
from string import ascii_uppercase __UpperCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)} __UpperCAmelCase = dict(enumerate(ascii_uppercase)) def lowercase__ ( __snake_case : str , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Any = len(__snake_case ) UpperCAmelCase_ : Optional[Any] = 0 while True: if x == i: UpperCAmelCase_ : List[Any] = 0 if len(__snake_case ) == len(__snake_case ): break key += key[i] i += 1 return key def lowercase__ ( __snake_case : str , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : List[str] = '' UpperCAmelCase_ : List[Any] = 0 for letter in message: if letter == " ": cipher_text += " " else: UpperCAmelCase_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def lowercase__ ( __snake_case : str , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Any = '' UpperCAmelCase_ : List[Any] = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: UpperCAmelCase_ : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Dict = 'THE GERMAN ATTACK' UpperCAmelCase_ : Optional[int] = 'SECRET' UpperCAmelCase_ : List[Any] = generate_key(__snake_case , __snake_case ) UpperCAmelCase_ : str = cipher_text(__snake_case , __snake_case ) print(F"Encrypted Text = {s}" ) print(F"Original Text = {original_text(__snake_case , __snake_case )}" ) if __name__ == "__main__": import doctest doctest.testmod() main()
29
def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
29
1
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json', } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Tuple = '''autoformer''' _snake_case : Tuple = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "student_t" , _UpperCamelCase = "nll" , _UpperCamelCase = 1 , _UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCamelCase = True , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 6_4 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 0.02 , _UpperCamelCase = True , _UpperCamelCase=True , _UpperCamelCase = 1_0 , _UpperCamelCase = 2_5 , _UpperCamelCase = 3 , **_UpperCamelCase , ) -> Union[str, Any]: # time series specific configuration UpperCAmelCase_ : str = prediction_length UpperCAmelCase_ : str = context_length if context_length is not None else prediction_length UpperCAmelCase_ : Union[str, Any] = distribution_output UpperCAmelCase_ : List[Any] = loss UpperCAmelCase_ : Any = input_size UpperCAmelCase_ : List[str] = num_time_features UpperCAmelCase_ : Optional[Any] = lags_sequence UpperCAmelCase_ : Optional[int] = scaling UpperCAmelCase_ : Union[str, Any] = num_dynamic_real_features UpperCAmelCase_ : Union[str, Any] = num_static_real_features UpperCAmelCase_ : int = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(_UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) UpperCAmelCase_ : Union[str, Any] = cardinality else: UpperCAmelCase_ : int = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(_UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) UpperCAmelCase_ : Tuple = embedding_dimension else: UpperCAmelCase_ : List[str] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase_ : Optional[Any] = num_parallel_samples # Transformer architecture configuration UpperCAmelCase_ : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features UpperCAmelCase_ : List[Any] = d_model UpperCAmelCase_ : List[str] = encoder_attention_heads UpperCAmelCase_ : List[Any] = decoder_attention_heads UpperCAmelCase_ : Optional[int] = encoder_ffn_dim UpperCAmelCase_ : Dict = decoder_ffn_dim UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : Union[str, Any] = decoder_layers UpperCAmelCase_ : Union[str, Any] = dropout UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : Optional[Any] = activation_dropout UpperCAmelCase_ : List[Any] = encoder_layerdrop UpperCAmelCase_ : Union[str, Any] = decoder_layerdrop UpperCAmelCase_ : Union[str, Any] = activation_function UpperCAmelCase_ : Union[str, Any] = init_std UpperCAmelCase_ : Optional[int] = use_cache # Autoformer UpperCAmelCase_ : int = label_length UpperCAmelCase_ : List[Any] = moving_average UpperCAmelCase_ : Optional[int] = autocorrelation_factor super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
29
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.txt'} __UpperCAmelCase = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __UpperCAmelCase = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __UpperCAmelCase = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : int = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_INIT_CONFIGURATION _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Any = ConvBertTokenizer def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars ): UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) ) UpperCAmelCase_ : str = do_lower_case UpperCAmelCase_ : List[Any] = strip_accents UpperCAmelCase_ : str = tokenize_chinese_chars UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase ) UpperCAmelCase_ : Any = do_lower_case def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]: UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
29
1
def lowercase__ ( __snake_case : list[int] ): '''simple docstring''' if not numbers: return 0 if not isinstance(__snake_case , (list, tuple) ) or not all( isinstance(__snake_case , __snake_case ) for number in numbers ): raise ValueError('numbers must be an iterable of integers' ) UpperCAmelCase_ : Union[str, Any] = numbers[0] for i in range(1 , len(__snake_case ) ): # update the maximum and minimum subarray products UpperCAmelCase_ : List[str] = numbers[i] if number < 0: UpperCAmelCase_ , UpperCAmelCase_ : str = min_till_now, max_till_now UpperCAmelCase_ : Optional[Any] = max(__snake_case , max_till_now * number ) UpperCAmelCase_ : int = min(__snake_case , min_till_now * number ) # update the maximum product found till now UpperCAmelCase_ : Optional[int] = max(__snake_case , __snake_case ) return max_prod
29
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = '''efficientformer''' def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None: super().__init__(**_UpperCamelCase ) UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = hidden_sizes UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Optional[Any] = depths UpperCAmelCase_ : List[Any] = mlp_expansion_ratio UpperCAmelCase_ : List[str] = downsamples UpperCAmelCase_ : List[Any] = dim UpperCAmelCase_ : Tuple = key_dim UpperCAmelCase_ : Optional[int] = attention_ratio UpperCAmelCase_ : str = resolution UpperCAmelCase_ : Dict = pool_size UpperCAmelCase_ : Union[str, Any] = downsample_patch_size UpperCAmelCase_ : List[str] = downsample_stride UpperCAmelCase_ : List[str] = downsample_pad UpperCAmelCase_ : Any = drop_path_rate UpperCAmelCase_ : Dict = num_metaad_blocks UpperCAmelCase_ : Dict = distillation UpperCAmelCase_ : int = use_layer_scale UpperCAmelCase_ : Any = layer_scale_init_value UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Dict = batch_norm_eps
29
1
def lowercase__ ( __snake_case : dict ): '''simple docstring''' UpperCAmelCase_ : set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack UpperCAmelCase_ : set[int] = set() return any( node not in visited and depth_first_search(__snake_case , __snake_case , __snake_case , __snake_case ) for node in graph ) def lowercase__ ( __snake_case : dict , __snake_case : int , __snake_case : set , __snake_case : set ): '''simple docstring''' visited.add(__snake_case ) rec_stk.add(__snake_case ) for node in graph[vertex]: if node not in visited: if depth_first_search(__snake_case , __snake_case , __snake_case , __snake_case ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(__snake_case ) return False if __name__ == "__main__": from doctest import testmod testmod()
29
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[PIL.Image.Image, np.ndarray] class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any: super().__init__() self.register_modules( prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: if latents is None: UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : int = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> int: if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str: if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ): UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 ) if not isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 ) UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state'] UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]: if isinstance(_UpperCamelCase , PIL.Image.Image ): UpperCAmelCase_ : Tuple = 1 elif isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : str = image.shape[0] elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase ) else: raise ValueError( f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : Tuple = self._execution_device UpperCAmelCase_ : str = batch_size * num_images_per_prompt UpperCAmelCase_ : str = guidance_scale > 1.0 UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # prior self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ : int = self.scheduler.timesteps UpperCAmelCase_ : int = self.prior.config.num_embeddings UpperCAmelCase_ : Any = self.prior.config.embedding_dim UpperCAmelCase_ : List[str] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : int = self.prior( _UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding # remove the variance UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = [] for i, latent in enumerate(_UpperCamelCase ): print() UpperCAmelCase_ : List[str] = self.renderer.decode( latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase ) if output_type not in ["np", "pil"]: raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" ) UpperCAmelCase_ : Dict = images.cpu().numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images] # Offload last model to CPU if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_UpperCamelCase )
29
1
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def lowercase__ ( __snake_case : Tuple , __snake_case : Tuple=None ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = None if token is not None: UpperCAmelCase_ : Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"} UpperCAmelCase_ : List[str] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" UpperCAmelCase_ : Any = requests.get(__snake_case , headers=__snake_case ).json() UpperCAmelCase_ : List[str] = {} try: job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) UpperCAmelCase_ : Any = math.ceil((result['total_count'] - 100) / 100 ) for i in range(__snake_case ): UpperCAmelCase_ : Union[str, Any] = requests.get(url + F"&page={i + 2}" , headers=__snake_case ).json() job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) return job_links except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def lowercase__ ( __snake_case : Optional[int] , __snake_case : Tuple=None ): '''simple docstring''' UpperCAmelCase_ : Dict = None if token is not None: UpperCAmelCase_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"} UpperCAmelCase_ : Dict = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" UpperCAmelCase_ : int = requests.get(__snake_case , headers=__snake_case ).json() UpperCAmelCase_ : Optional[int] = {} try: artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) UpperCAmelCase_ : Optional[Any] = math.ceil((result['total_count'] - 100) / 100 ) for i in range(__snake_case ): UpperCAmelCase_ : Dict = requests.get(url + F"&page={i + 2}" , headers=__snake_case ).json() artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) return artifacts except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def lowercase__ ( __snake_case : str , __snake_case : str , __snake_case : int , __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : Any = None if token is not None: UpperCAmelCase_ : Any = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"} UpperCAmelCase_ : Any = requests.get(__snake_case , headers=__snake_case , allow_redirects=__snake_case ) UpperCAmelCase_ : List[Any] = result.headers['Location'] UpperCAmelCase_ : str = requests.get(__snake_case , allow_redirects=__snake_case ) UpperCAmelCase_ : str = os.path.join(__snake_case , F"{artifact_name}.zip" ) with open(__snake_case , 'wb' ) as fp: fp.write(response.content ) def lowercase__ ( __snake_case : Any , __snake_case : Any=None ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : List[str] = None with zipfile.ZipFile(__snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(__snake_case ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(__snake_case ) as f: for line in f: UpperCAmelCase_ : List[str] = line.decode('UTF-8' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs UpperCAmelCase_ : Optional[Any] = line[: line.index(': ' )] UpperCAmelCase_ : Dict = line[line.index(': ' ) + len(': ' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('FAILED ' ): # `test` is the test method that failed UpperCAmelCase_ : List[Any] = line[len('FAILED ' ) :] failed_tests.append(__snake_case ) elif filename == "job_name.txt": UpperCAmelCase_ : Optional[Any] = line if len(__snake_case ) != len(__snake_case ): raise ValueError( F"`errors` and `failed_tests` should have the same number of elements. Got {len(__snake_case )} for `errors` " F"and {len(__snake_case )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" ' problem.' ) UpperCAmelCase_ : List[str] = None if job_name and job_links: UpperCAmelCase_ : Optional[int] = job_links.get(__snake_case , __snake_case ) # A list with elements of the form (line of error, error, failed test) UpperCAmelCase_ : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(__snake_case , __snake_case )] return result def lowercase__ ( __snake_case : str , __snake_case : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : int = [os.path.join(__snake_case , __snake_case ) for p in os.listdir(__snake_case ) if p.endswith('.zip' )] for p in paths: errors.extend(get_errors_from_single_artifact(__snake_case , job_links=__snake_case ) ) return errors def lowercase__ ( __snake_case : int , __snake_case : str=None ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = Counter() counter.update([x[1] for x in logs] ) UpperCAmelCase_ : List[str] = counter.most_common() UpperCAmelCase_ : int = {} for error, count in counts: if error_filter is None or error not in error_filter: UpperCAmelCase_ : List[str] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]} UpperCAmelCase_ : Union[str, Any] = dict(sorted(r.items() , key=lambda __snake_case : item[1]["count"] , reverse=__snake_case ) ) return r def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = test.split('::' )[0] if test.startswith('tests/models/' ): UpperCAmelCase_ : Optional[int] = test.split('/' )[2] else: UpperCAmelCase_ : Tuple = None return test def lowercase__ ( __snake_case : Optional[Any] , __snake_case : int=None ): '''simple docstring''' UpperCAmelCase_ : Any = [(x[0], x[1], get_model(x[2] )) for x in logs] UpperCAmelCase_ : Dict = [x for x in logs if x[2] is not None] UpperCAmelCase_ : List[str] = {x[2] for x in logs} UpperCAmelCase_ : int = {} for test in tests: UpperCAmelCase_ : int = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) UpperCAmelCase_ : Optional[Any] = counter.most_common() UpperCAmelCase_ : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} UpperCAmelCase_ : Union[str, Any] = sum(error_counts.values() ) if n_errors > 0: UpperCAmelCase_ : List[Any] = {'count': n_errors, 'errors': error_counts} UpperCAmelCase_ : List[str] = dict(sorted(r.items() , key=lambda __snake_case : item[1]["count"] , reverse=__snake_case ) ) return r def lowercase__ ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = '| no. | error | status |' UpperCAmelCase_ : str = '|-:|:-|:-|' UpperCAmelCase_ : Optional[int] = [header, sep] for error in reduced_by_error: UpperCAmelCase_ : Union[str, Any] = reduced_by_error[error]['count'] UpperCAmelCase_ : Optional[Any] = F"| {count} | {error[:100]} | |" lines.append(__snake_case ) return "\n".join(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Dict = '| model | no. of errors | major error | count |' UpperCAmelCase_ : Union[str, Any] = '|-:|-:|-:|-:|' UpperCAmelCase_ : Any = [header, sep] for model in reduced_by_model: UpperCAmelCase_ : Tuple = reduced_by_model[model]['count'] UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = list(reduced_by_model[model]['errors'].items() )[0] UpperCAmelCase_ : Any = F"| {model} | {count} | {error[:60]} | {_count} |" lines.append(__snake_case ) return "\n".join(__snake_case ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') __UpperCAmelCase = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __UpperCAmelCase = get_job_links(args.workflow_run_id, token=args.token) __UpperCAmelCase = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __UpperCAmelCase = k.find(' / ') __UpperCAmelCase = k[index + len(' / ') :] __UpperCAmelCase = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __UpperCAmelCase = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __UpperCAmelCase = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __UpperCAmelCase = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __UpperCAmelCase = reduce_by_error(errors) __UpperCAmelCase = reduce_by_model(errors) __UpperCAmelCase = make_github_table(reduced_by_error) __UpperCAmelCase = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
29
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline _snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) _snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __UpperCAmelCase ( self ) -> Optional[Any]: return self._get_superresolution_dummy_components() def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any: if str(_UpperCamelCase ).startswith('mps' ): UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase ) else: UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __UpperCAmelCase ( self ) -> Dict: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __UpperCAmelCase ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __UpperCAmelCase ( self ) -> List[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __UpperCAmelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
29
1
import math import random def lowercase__ ( __snake_case : float , __snake_case : bool = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase = 0.0_2 def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : str = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__snake_case ): # Forward propagation UpperCAmelCase_ : str = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCAmelCase_ : Union[str, Any] = (expected / 100) - layer_a # Error delta UpperCAmelCase_ : Any = layer_1_error * sigmoid_function(__snake_case , __snake_case ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = int(input('Expected value: ')) __UpperCAmelCase = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict: logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) UpperCAmelCase_ : Any = model UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase ) def __call__( self , **_UpperCamelCase ) -> str: UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()} return self.model.run(_UpperCamelCase , _UpperCamelCase ) @staticmethod def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) UpperCAmelCase_ : List[str] = 'CPUExecutionProvider' return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict: UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name ) UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase ) if src_path.exists(): UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]: if os.path.isfile(_UpperCamelCase ): logger.error(f"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) # saving model weights/files self._save_pretrained(_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]: UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model( os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) UpperCAmelCase_ : Tuple = Path(_UpperCamelCase ) # load model from hub else: # download model UpperCAmelCase_ : List[str] = hf_hub_download( repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , ) UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) return cls(model=_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]: UpperCAmelCase_ : List[str] = None if len(str(_UpperCamelCase ).split('@' ) ) == 2: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' ) return cls._from_pretrained( model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
29
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Any = '''sew''' def __init__( self , _UpperCamelCase=3_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase=2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase="group" , _UpperCamelCase="gelu" , _UpperCamelCase=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _UpperCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _UpperCamelCase=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _UpperCamelCase=False , _UpperCamelCase=1_2_8 , _UpperCamelCase=1_6 , _UpperCamelCase=True , _UpperCamelCase=0.05 , _UpperCamelCase=1_0 , _UpperCamelCase=2 , _UpperCamelCase=0.0 , _UpperCamelCase=1_0 , _UpperCamelCase=0 , _UpperCamelCase="mean" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=2_5_6 , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> Optional[Any]: super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase ) UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : str = feat_extract_norm UpperCAmelCase_ : Union[str, Any] = feat_extract_activation UpperCAmelCase_ : str = list(_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = list(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = list(_UpperCamelCase ) UpperCAmelCase_ : Tuple = conv_bias UpperCAmelCase_ : Dict = num_conv_pos_embeddings UpperCAmelCase_ : Optional[int] = num_conv_pos_embedding_groups UpperCAmelCase_ : Tuple = len(self.conv_dim ) UpperCAmelCase_ : int = num_hidden_layers UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Tuple = squeeze_factor UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : Tuple = hidden_dropout UpperCAmelCase_ : Dict = attention_dropout UpperCAmelCase_ : str = activation_dropout UpperCAmelCase_ : Optional[Any] = feat_proj_dropout UpperCAmelCase_ : Any = final_dropout UpperCAmelCase_ : Optional[int] = layerdrop UpperCAmelCase_ : List[Any] = layer_norm_eps UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Union[str, Any] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Optional[Any] = apply_spec_augment UpperCAmelCase_ : Optional[Any] = mask_time_prob UpperCAmelCase_ : Any = mask_time_length UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks UpperCAmelCase_ : List[str] = mask_feature_prob UpperCAmelCase_ : Dict = mask_feature_length UpperCAmelCase_ : List[str] = mask_feature_min_masks # ctc loss UpperCAmelCase_ : str = ctc_loss_reduction UpperCAmelCase_ : int = ctc_zero_infinity # sequence classification UpperCAmelCase_ : List[Any] = use_weighted_layer_sum UpperCAmelCase_ : Optional[Any] = classifier_proj_size @property def __UpperCAmelCase ( self ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
29
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : Tuple = datasets.Features( { 'tokens': datasets.Sequence(datasets.Value('string' ) ), 'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ), 'answers': datasets.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), 'id': datasets.Value('int64' ), } ) UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict( { 'tokens': [['foo'] * 5] * n, 'labels': [[1] * 5] * n, 'answers': [{'answer_start': [97], 'text': ['1976']}] * 10, 'id': list(range(__snake_case ) ), } , features=__snake_case , ) return dataset @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' ) dataset.map(cache_file_name=__snake_case ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt' UpperCAmelCase_ : Tuple = FILE_CONTENT with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' import bza UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2' UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' ) with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' ) UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' ) with gzip.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lza.frame.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ): '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z' with pyazr.SevenZipFile(__snake_case , 'w' ) as archive: archive.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ): '''simple docstring''' import tarfile UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' import lzma UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lzma.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ): '''simple docstring''' import zipfile UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst' UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' ) with zstd.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml' UpperCAmelCase_ : List[Any] = textwrap.dedent( '\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' ) with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case ) UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' ) dataset.map(cache_file_name=__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' ) with contextlib.closing(sqlitea.connect(__snake_case ) ) as con: UpperCAmelCase_ : List[Any] = con.cursor() cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' ) for item in DATA: cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any ): '''simple docstring''' import bza UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2' with open(__snake_case , 'rb' ) as f: UpperCAmelCase_ : int = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) ) f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' ) UpperCAmelCase_ : Dict = pa.schema( { 'col_1': pa.string(), 'col_2': pa.intaa(), 'col_3': pa.floataa(), } ) with open(__snake_case , 'wb' ) as f: UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case ) UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case ) writer.write_table(__snake_case ) writer.close() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Optional[int] = {'data': DATA} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_312: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_STR: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Dict ): '''simple docstring''' import gzip UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int , __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Any = ['0', '1', '2', '3'] UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3'] UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Dict = ['0', '1', '2', '3'] UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc' with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) ) f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] ) UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' ) @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' ) @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' ) (data_dir / "subdir").mkdir() with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden file with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) return data_dir
29
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : str = '''speech_to_text_2''' _snake_case : Optional[int] = ['''past_key_values'''] _snake_case : str = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , _UpperCamelCase=1_0_0_0_0 , _UpperCamelCase=6 , _UpperCamelCase=2_0_4_8 , _UpperCamelCase=4 , _UpperCamelCase=0.0 , _UpperCamelCase=True , _UpperCamelCase="relu" , _UpperCamelCase=2_5_6 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase=1_0_2_4 , **_UpperCamelCase , ) -> Optional[Any]: UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : Optional[int] = d_model UpperCAmelCase_ : Dict = decoder_ffn_dim UpperCAmelCase_ : int = decoder_layers UpperCAmelCase_ : Tuple = decoder_attention_heads UpperCAmelCase_ : str = dropout UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : List[str] = activation_dropout UpperCAmelCase_ : Any = activation_function UpperCAmelCase_ : List[Any] = init_std UpperCAmelCase_ : Optional[Any] = decoder_layerdrop UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : Dict = decoder_layers UpperCAmelCase_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase_ : Optional[int] = max_target_positions super().__init__( pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , )
29
from __future__ import annotations def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position UpperCAmelCase_ : str = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ : Optional[Any] = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__snake_case ) return permissible_positions def lowercase__ ( __snake_case : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' if is_complete(__snake_case ): return True for position in get_valid_pos(__snake_case , len(__snake_case ) ): UpperCAmelCase_ , UpperCAmelCase_ : Any = position if board[y][x] == 0: UpperCAmelCase_ : Optional[Any] = curr + 1 if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ): return True UpperCAmelCase_ : List[Any] = 0 return False def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): UpperCAmelCase_ : Optional[Any] = 1 if open_knight_tour_helper(__snake_case , (i, j) , 1 ): return board UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
29
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : int = 0 @slow def __UpperCAmelCase ( self ) -> List[Any]: for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_UpperCamelCase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_UpperCamelCase ) , 0 ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) # Check that tokenizer_type ≠ model_type UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCamelCase , 'vocab.txt' ) ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='bert' , use_fast=_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCamelCase , 'vocab.json' ) ) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCamelCase , 'merges.txt' ) ) UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='gpt2' , use_fast=_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) @require_tokenizers def __UpperCAmelCase ( self ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCamelCase , 'vocab.txt' ) ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='bert' ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCamelCase , 'vocab.json' ) ) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCamelCase , 'merges.txt' ) ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='gpt2' ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> List[str]: with pytest.raises(_UpperCamelCase ): AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' ) @require_tokenizers def __UpperCAmelCase ( self ) -> Optional[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase_ : Tuple = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' ) self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) if isinstance(_UpperCamelCase , _UpperCamelCase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCamelCase ) else: self.assertEqual(tokenizer.do_lower_case , _UpperCamelCase ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def __UpperCAmelCase ( self ) -> Any: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _UpperCamelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ): UpperCAmelCase_ : Tuple = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' ) def __UpperCAmelCase ( self ) -> int: # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai UpperCAmelCase_ : Dict = TOKENIZER_MAPPING.values() UpperCAmelCase_ : Union[str, Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_UpperCamelCase ) @require_tokenizers def __UpperCAmelCase ( self ) -> List[str]: self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCamelCase ) , _UpperCamelCase ) self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , _UpperCamelCase ) @require_tokenizers def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_UpperCamelCase ) UpperCAmelCase_ : Tuple = 'Hello, world. How are you?' UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(_UpperCamelCase ) self.assertEqual('[UNK]' , tokens[0] ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_UpperCamelCase ) UpperCAmelCase_ : int = tokenizer.tokenize(_UpperCamelCase ) self.assertEqual('[UNK]' , tokens[0] ) @require_tokenizers def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' ) self.assertEqual(type(_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , '[UNK]' ) self.assertEqual(tokenizer.padding_side , 'right' ) self.assertEqual(tokenizer.truncation_side , 'right' ) def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCamelCase ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('ctrl' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> List[str]: # Check we can load the tokenizer config of an online model. UpperCAmelCase_ : int = get_tokenizer_config('bert-base-cased' ) UpperCAmelCase_ : Optional[Any] = config.pop('_commit_hash' , _UpperCamelCase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_UpperCamelCase , {'do_lower_case': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase_ : Optional[int] = get_tokenizer_config(_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCamelCase ) UpperCAmelCase_ : Tuple = get_tokenizer_config(_UpperCamelCase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' ) def __UpperCAmelCase ( self ) -> Union[str, Any]: try: AutoConfig.register('custom' , _UpperCamelCase ) AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCamelCase ): AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase ) UpperCAmelCase_ : int = CustomTokenizer.from_pretrained(_UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def __UpperCAmelCase ( self ) -> Any: try: AutoConfig.register('custom' , _UpperCamelCase ) # Can register in two steps AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _UpperCamelCase , slow_tokenizer_class=_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCamelCase ): AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Any = BertTokenizerFast.from_pretrained(_UpperCamelCase ) bert_tokenizer.save_pretrained(_UpperCamelCase ) UpperCAmelCase_ : Dict = CustomTokenizerFast.from_pretrained(_UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCamelCase ) UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_UpperCamelCase , use_fast=_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __UpperCAmelCase ( self ) -> str: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_UpperCamelCase ): UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase ) UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCamelCase ) UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version UpperCAmelCase_ : int = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCamelCase ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' ) @require_tokenizers def __UpperCAmelCase ( self ) -> Optional[Any]: class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Any = False class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Tuple = NewTokenizer _snake_case : int = False try: AutoConfig.register('custom' , _UpperCamelCase ) AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase ) AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase ) # If remote code is not set, the default is to use local UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase_ : str = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : str = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version UpperCAmelCase_ : int = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) def __UpperCAmelCase ( self ) -> Dict: with self.assertRaisesRegex( _UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ): UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('bert-base' ) def __UpperCAmelCase ( self ) -> str: with self.assertRaisesRegex( _UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase , revision='aaaaaa' ) def __UpperCAmelCase ( self ) -> Tuple: # Make sure we have cached the tokenizer. UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
29
def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase_ : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , __snake_case ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __UpperCAmelCase = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: __UpperCAmelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
29
1
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCAmelCase = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=1_8 , _UpperCamelCase=3_0 , _UpperCamelCase=4_0_0 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , ) -> Optional[int]: UpperCAmelCase_ : List[Any] = size if size is not None else {'height': 2_0, 'width': 2_0} UpperCAmelCase_ : str = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Union[str, Any] = min_resolution UpperCAmelCase_ : Union[str, Any] = max_resolution UpperCAmelCase_ : Optional[int] = size UpperCAmelCase_ : Union[str, Any] = do_normalize UpperCAmelCase_ : Union[str, Any] = do_convert_rgb UpperCAmelCase_ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase_ : Optional[int] = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6} def __UpperCAmelCase ( self ) -> Optional[Any]: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' UpperCAmelCase_ : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCamelCase (_snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Dict = PixaStructImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : Union[str, Any] = PixaStructImageProcessingTester(self ) @property def __UpperCAmelCase ( self ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) ) def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : Union[str, Any] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase_ : List[str] = 2_0_4_8 UpperCAmelCase_ : List[str] = image_processor(_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) ) def __UpperCAmelCase ( self ) -> Union[str, Any]: # Initialize image_processor UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input UpperCAmelCase_ : Dict = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ : Dict = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : Optional[Any] = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __UpperCAmelCase ( self ) -> Tuple: # Initialize image_processor UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input UpperCAmelCase_ : List[str] = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase_ : List[str] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_UpperCamelCase ): UpperCAmelCase_ : List[str] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches UpperCAmelCase_ : Any = 'Hello' UpperCAmelCase_ : Optional[int] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : Any = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __UpperCAmelCase ( self ) -> Any: # Initialize image_processor UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , np.ndarray ) UpperCAmelCase_ : List[Any] = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ : Optional[int] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : int = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __UpperCAmelCase ( self ) -> int: # Initialize image_processor UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Any = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ : List[str] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : List[str] = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCamelCase (_snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : List[str] = PixaStructImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase_ : int = 3 @property def __UpperCAmelCase ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) ) def __UpperCAmelCase ( self ) -> str: # Initialize image_processor UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input UpperCAmelCase_ : List[str] = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ : List[str] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : Any = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
29
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) self.check_model_type(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {} if padding is not None: UpperCAmelCase_ : List[str] = padding if truncation is not None: UpperCAmelCase_ : Tuple = truncation if top_k is not None: UpperCAmelCase_ : Dict = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int: if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question} else: UpperCAmelCase_ : List[str] = image UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = load_image(inputs['image'] ) UpperCAmelCase_ : Dict = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase ) UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) model_inputs.update(_UpperCamelCase ) return model_inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : Any = self.model(**_UpperCamelCase ) return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str: if top_k > self.model.config.num_labels: UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase_ : Optional[Any] = scores.tolist() UpperCAmelCase_ : Tuple = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
29
1
def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
29
import os # Precomputes a list of the 100 first triangular numbers __UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' ) UpperCAmelCase_ : Union[str, Any] = '' with open(__snake_case ) as f: UpperCAmelCase_ : List[Any] = f.readline() UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] UpperCAmelCase_ : Optional[int] = [ word for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__snake_case ) if __name__ == "__main__": print(solution())
29
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : str = '''time_series_transformer''' _snake_case : Optional[int] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "student_t" , _UpperCamelCase = "nll" , _UpperCamelCase = 1 , _UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCamelCase = "mean" , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = True , _UpperCamelCase = "gelu" , _UpperCamelCase = 6_4 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 0.02 , _UpperCamelCase=True , **_UpperCamelCase , ) -> Optional[int]: # time series specific configuration UpperCAmelCase_ : Any = prediction_length UpperCAmelCase_ : int = context_length or prediction_length UpperCAmelCase_ : Tuple = distribution_output UpperCAmelCase_ : Optional[int] = loss UpperCAmelCase_ : Optional[Any] = input_size UpperCAmelCase_ : List[Any] = num_time_features UpperCAmelCase_ : str = lags_sequence UpperCAmelCase_ : Union[str, Any] = scaling UpperCAmelCase_ : List[Any] = num_dynamic_real_features UpperCAmelCase_ : Tuple = num_static_real_features UpperCAmelCase_ : Tuple = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(_UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) UpperCAmelCase_ : str = cardinality else: UpperCAmelCase_ : List[str] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(_UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) UpperCAmelCase_ : Any = embedding_dimension else: UpperCAmelCase_ : str = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase_ : int = num_parallel_samples # Transformer architecture configuration UpperCAmelCase_ : str = input_size * len(_UpperCamelCase ) + self._number_of_features UpperCAmelCase_ : Optional[Any] = d_model UpperCAmelCase_ : List[Any] = encoder_attention_heads UpperCAmelCase_ : List[Any] = decoder_attention_heads UpperCAmelCase_ : int = encoder_ffn_dim UpperCAmelCase_ : Any = decoder_ffn_dim UpperCAmelCase_ : Dict = encoder_layers UpperCAmelCase_ : Tuple = decoder_layers UpperCAmelCase_ : Union[str, Any] = dropout UpperCAmelCase_ : Optional[Any] = attention_dropout UpperCAmelCase_ : Optional[Any] = activation_dropout UpperCAmelCase_ : List[str] = encoder_layerdrop UpperCAmelCase_ : List[str] = decoder_layerdrop UpperCAmelCase_ : int = activation_function UpperCAmelCase_ : List[Any] = init_std UpperCAmelCase_ : Optional[Any] = use_cache super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
29
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __UpperCAmelCase = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __UpperCAmelCase = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( __snake_case : str ): '''simple docstring''' if "://" in dataset_path: UpperCAmelCase_ : int = dataset_path.split('://' )[1] return dataset_path def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ): '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) ) else: fs.mv(__snake_case , __snake_case , recursive=__snake_case ) def lowercase__ ( ): '''simple docstring''' if hasattr(fsspec.asyn , 'reset_lock' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = threading.Lock()
29
1
import os # Precomputes a list of the 100 first triangular numbers __UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' ) UpperCAmelCase_ : Union[str, Any] = '' with open(__snake_case ) as f: UpperCAmelCase_ : List[Any] = f.readline() UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] UpperCAmelCase_ : Optional[int] = [ word for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__snake_case ) if __name__ == "__main__": print(solution())
29
def lowercase__ ( __snake_case : list ): '''simple docstring''' for i in range(len(__snake_case ) - 1 , 0 , -1 ): UpperCAmelCase_ : Dict = False for j in range(__snake_case , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j] UpperCAmelCase_ : int = True for j in range(__snake_case ): if unsorted[j] > unsorted[j + 1]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j] UpperCAmelCase_ : Any = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip() __UpperCAmelCase = [int(item) for item in user_input.split(',')] print(F'{cocktail_shaker_sort(unsorted) = }')
29
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _a ( ) -> Any: a = ArgumentParser( description=( '''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=a , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=a , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=a ) return parser.parse_args() def _a ( ) -> Tuple: a = parse_args() # Import training_script as a module. a = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) a = script_fpath.stem a = importlib.import_module(a ) # Patch sys.argv a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
0
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ): '''simple docstring''' UpperCAmelCase_ : Dict = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : List[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ): '''simple docstring''' UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) ) UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1 UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] ) UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 ) return image class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]: super().__init__() self.register_modules( unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , ) UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: # get the original timestep using init_timestep UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple: if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase ) UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCAmelCase_ : List[str] = image else: if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Any = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase ) ] UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 ) else: UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase ) UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 ) UpperCAmelCase_ : Tuple = init_latents.shape UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) # get latents UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = init_latents return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : Optional[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=_UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : Dict = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase ) # We'll offload the last model manually. UpperCAmelCase_ : Any = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __UpperCAmelCase ( self ) -> Dict: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str: UpperCAmelCase_ : Any = self._execution_device UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0 if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0] if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase ) if not isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Tuple = [image] if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" ) UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 ) UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents'] UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 ) self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt ) UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor ) UpperCAmelCase_ : Dict = self.prepare_latents( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : str = {'image_embeds': image_embeds} UpperCAmelCase_ : Union[str, Any] = self.unet( sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 ) UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0] # post-processing UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[str] = image * 0.5 + 0.5 UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCamelCase )
29
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = SwinConfig( embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , ) UpperCAmelCase_ = DetaConfig( backbone_config=snake_case_ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=snake_case_ , with_box_refine=snake_case_ , two_stage=snake_case_ , ) # set labels UpperCAmelCase_ = "huggingface/label-files" if "o365" in model_name: UpperCAmelCase_ = 3_66 UpperCAmelCase_ = "object365-id2label.json" else: UpperCAmelCase_ = 91 UpperCAmelCase_ = "coco-detection-id2label.json" UpperCAmelCase_ = num_labels UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type="dataset" ) ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = [] # stem # fmt: off rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") ) rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") ) rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") ) rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") ) rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") ) rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") ) rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") ) # fmt: on return rename_keys def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[int] ) -> int: '''simple docstring''' UpperCAmelCase_ = dct.pop(snake_case_ ) UpperCAmelCase_ = val def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:dim, :] UpperCAmelCase_ = in_proj_bias[: dim] UpperCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase_ = in_proj_bias[ dim : dim * 2 ] UpperCAmelCase_ = in_proj_weight[ -dim :, : ] UpperCAmelCase_ = in_proj_bias[-dim :] # fmt: on def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:hidden_size, :] UpperCAmelCase_ = in_proj_bias[:hidden_size] UpperCAmelCase_ = in_proj_weight[ hidden_size : hidden_size * 2, : ] UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase_ = in_proj_weight[-hidden_size:, :] UpperCAmelCase_ = in_proj_bias[-hidden_size:] def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : str ) -> int: '''simple docstring''' UpperCAmelCase_ = get_deta_config(snake_case_ ) # load original state dict if model_name == "deta-swin-large": UpperCAmelCase_ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" ) elif model_name == "deta-swin-large-o365": UpperCAmelCase_ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" ) else: raise ValueError(f"""Model name {model_name} not supported""" ) UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model"] # original state dict for name, param in state_dict.items(): print(snake_case_ , param.shape ) # rename keys UpperCAmelCase_ = create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) read_in_swin_q_k_v(snake_case_ , config.backbone_config ) read_in_decoder_q_k_v(snake_case_ , snake_case_ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val if "input_proj" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val # finally, create HuggingFace model and load state dict UpperCAmelCase_ = DetaForObjectDetection(snake_case_ ) model.load_state_dict(snake_case_ ) model.eval() UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" model.to(snake_case_ ) # load image processor UpperCAmelCase_ = DetaImageProcessor(format="coco_detection" ) # verify our conversion on image UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = processor(images=snake_case_ , return_tensors="pt" ) UpperCAmelCase_ = encoding["pixel_values"] UpperCAmelCase_ = model(pixel_values.to(snake_case_ ) ) # verify logits print("Logits:" , outputs.logits[0, :3, :3] ) print("Boxes:" , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": UpperCAmelCase_ = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) UpperCAmelCase_ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": UpperCAmelCase_ = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) UpperCAmelCase_ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case_ ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case_ ) , atol=1E-4 ) print("Everything ok!" ) if pytorch_dump_folder_path: # Save model and processor logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) # Push to hub if push_to_hub: print("Pushing model and processor to hub..." ) model.push_to_hub(f"""jozhang97/{model_name}""" ) processor.push_to_hub(f"""jozhang97/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser() parser.add_argument( '--model_name', type=str, default='deta-swin-large', choices=['deta-swin-large', 'deta-swin-large-o365'], help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ): '''simple docstring''' try: UpperCAmelCase_ : int = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase_ : Optional[int] = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase_ : List[Any] = strtobool(__snake_case ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value __UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skip('Test was skipped' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : Optional[int] ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case ) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case ) def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ): '''simple docstring''' if test_case is None: return partial(__snake_case , version=__snake_case ) return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case ) __UpperCAmelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = True @classmethod def __UpperCAmelCase ( cls ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = tempfile.mkdtemp() @classmethod def __UpperCAmelCase ( cls ) -> List[str]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCAmelCase ( self ) -> str: if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_UpperCamelCase ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Optional[int]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = AcceleratorState() UpperCAmelCase_ : str = tensor[None].clone().to(state.device ) UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu() UpperCAmelCase_ : List[Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __snake_case ): return False return True class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : str = returncode UpperCAmelCase_ : Optional[Any] = stdout UpperCAmelCase_ : Optional[Any] = stderr async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ): '''simple docstring''' while True: UpperCAmelCase_ : Dict = await stream.readline() if line: callback(__snake_case ) else: break async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ): '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : str = [] def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ): UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip() sink.append(__snake_case ) if not quiet: print(__snake_case , __snake_case , file=__snake_case ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ), ] , timeout=__snake_case , ) return _RunOutput(await p.wait() , __snake_case , __snake_case ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ): '''simple docstring''' UpperCAmelCase_ : str = asyncio.get_event_loop() UpperCAmelCase_ : int = loop.run_until_complete( _stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) ) UpperCAmelCase_ : int = ' '.join(__snake_case ) if result.returncode > 0: UpperCAmelCase_ : int = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class lowerCamelCase (_snake_case ): '''simple docstring''' pass def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ): '''simple docstring''' try: UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__snake_case , 'decode' ): UpperCAmelCase_ : str = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
29
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = BlenderbotConfig lowerCAmelCase__ : Dict = {} lowerCAmelCase__ : List[Any] = """gelu""" def __init__(self : str , UpperCamelCase : Dict , UpperCamelCase : Any=13 , UpperCamelCase : List[Any]=7 , UpperCamelCase : str=True , UpperCamelCase : Dict=False , UpperCamelCase : Optional[Any]=99 , UpperCamelCase : Dict=32 , UpperCamelCase : int=2 , UpperCamelCase : str=4 , UpperCamelCase : Any=37 , UpperCamelCase : str=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : Dict=20 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Optional[int]=1 , UpperCamelCase : Optional[int]=0 , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = eos_token_id lowercase__ = pad_token_id lowercase__ = bos_token_id def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__ = prepare_blenderbot_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def UpperCamelCase__ (self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict ): '''simple docstring''' lowercase__ = TFBlenderbotModel(config=UpperCamelCase ).get_decoder() lowercase__ = inputs_dict['''input_ids'''] lowercase__ = input_ids[:1, :] lowercase__ = inputs_dict['''attention_mask'''][:1, :] lowercase__ = inputs_dict['''head_mask'''] lowercase__ = 1 # first forward pass lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , use_cache=UpperCamelCase ) lowercase__ ,lowercase__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase__ = tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0] lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase__ = output_from_no_past[:, -3:, random_slice_idx] lowercase__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , rtol=1E-3 ) def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None , A=None , A=None , A=None , ) -> Any: """simple docstring""" if attention_mask is None: lowercase__ = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () lowerCAmelCase__ : Tuple = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase__ : List[Any] = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : str = False def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = TFBlenderbotModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase ) @require_tokenizers @require_tf class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : int = ["""My friends are cool but they eat too many carbs."""] lowerCAmelCase__ : Any = """facebook/blenderbot-400M-distill""" @cached_property def UpperCamelCase__ (self : List[str] ): '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = self.tokenizer(self.src_text , return_tensors='''tf''' ) lowercase__ = self.model.generate( model_inputs.input_ids , ) lowercase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
2
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __UpperCAmelCase = logging.getLogger(__name__) def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ): '''simple docstring''' def get_dataset(__snake_case : Optional[Any] ): UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCAmelCase_ : Any = get_dataset(__snake_case ) UpperCAmelCase_ : str = get_dataset(__snake_case ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [] for epoch in range(__snake_case ): # Train quickly model.train() for batch in dataloader: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch UpperCAmelCase_ : List[Any] = model(__snake_case ) UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case ) accelerator.backward(__snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCamelCase (nn.Module ): '''simple docstring''' def __init__( self ) -> Optional[Any]: super().__init__() UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) ) UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]: return x * self.a + self.b class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[Any] = DummyModel() UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() # Train baseline UpperCAmelCase_ : Tuple = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' ) accelerator.save_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Any = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders() UpperCAmelCase_ : Optional[Any] = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' ) accelerator.save_state(_UpperCamelCase ) # Load everything back in and make sure all states work accelerator.load_state(_UpperCamelCase ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders() UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : Any = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item() UpperCAmelCase_ : List[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] ) UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] ) UpperCAmelCase_ : Union[str, Any] = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() ) UpperCAmelCase_ : Any = Accelerator() with self.assertRaises(_UpperCamelCase ) as ve: accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() UpperCAmelCase_ : Dict = scheduler.state_dict() train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(_UpperCamelCase , scheduler.state_dict() ) def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[int] = DummyModel() UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 ) # Train baseline UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase = '/tmp/accelerate/state_checkpointing' __UpperCAmelCase = DummyModel() __UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3) __UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) __UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders() __UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert param_device.type == accelerator.device.type __UpperCAmelCase = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
29
0
'''simple docstring''' import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( __snake_case , unittest.TestCase ): __magic_name__ = CLIPTokenizer __magic_name__ = CLIPTokenizerFast __magic_name__ = True __magic_name__ = {} __magic_name__ = False def __lowerCAmelCase ( self ) -> Any: """simple docstring""" super().setUp() # fmt: off A : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on A : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) A : Any = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>'''] A : List[Any] = {'''unk_token''': '''<unk>'''} A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : str = '''lower newer''' A : int = '''lower newer''' return input_text, output_text def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A : Optional[Any] = '''lower newer''' A : int = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>'''] A : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = tokens + [tokenizer.unk_token] A : Any = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) @require_ftfy def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A : int = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : List[str] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.''' A : Any = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE ) A : List[Any] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways A : List[Any] = '''xa\u0303y''' + ''' ''' + '''x\xe3y''' A : Union[str, Any] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE ) A : Dict = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Test that the tokenization is identical on unicode of space type A : Tuple = [ '''\u0009''', # (horizontal tab, '\t') '''\u000B''', # (vertical tab) '''\u000C''', # (form feed) '''\u0020''', # (space, ' ') '''\u200E''', # (left-to-right mark):w '''\u200F''', # (right-to-left mark) ] for unicode_seq in spaces_unicodes: A : List[str] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE ) A : List[Any] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Test that the tokenization is identical on unicode of line break type A : int = [ '''\u000A''', # (line feed, '\n') '''\r\n''', # (carriage return and line feed, '\r\n') '''\u000D''', # (carriage return, '\r') '''\r''', # (carriage return, '\r') '''\u000D''', # (carriage return, '\r') '''\u2028''', # (line separator) '''\u2029''', # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: A : List[str] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` A : List[Any] = F'{text_of_1_token} {text_of_1_token}' A : Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , ) A : Optional[int] = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ) + 1, len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , ) A : Any = F' {text}' A : Dict = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , ) A : int = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ) + 1, 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ) as context: self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' ) self.assertTrue( context.exception.args[0].startswith( '''The `backend_tokenizer` provided does not match the expected format.''' ) ) @require_ftfy def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" super().test_tokenization_python_rust_equals() def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" pass
3
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None: warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
29
0
'''simple docstring''' __snake_case ={ "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.6_02_17_66_34e-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.3_5_5_8_1_8, } def a_ ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowerCAmelCase = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {', '.join(lowerCamelCase )}''' ) raise ValueError(lowerCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
4
def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head: return True # split the list to two parts UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head while fast and fast.next: UpperCAmelCase_ : str = fast.next.next UpperCAmelCase_ : Union[str, Any] = slow.next UpperCAmelCase_ : int = slow.next UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase_ : Tuple = None while second: UpperCAmelCase_ : int = second.next UpperCAmelCase_ : Any = node UpperCAmelCase_ : Optional[Any] = second UpperCAmelCase_ : Tuple = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase_ : Optional[Any] = node.next UpperCAmelCase_ : Dict = head.next return True def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase_ : Any = head while fast and fast.next: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase_ : List[str] = [slow.val] while slow.next: UpperCAmelCase_ : List[str] = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase_ : int = cur.next return True def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head or not head.next: return True UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : int = 0 while head: if head.val in d: d[head.val].append(__snake_case ) else: UpperCAmelCase_ : List[Any] = [pos] UpperCAmelCase_ : Any = head.next pos += 1 UpperCAmelCase_ : Dict = pos - 1 UpperCAmelCase_ : Optional[int] = 0 for v in d.values(): if len(__snake_case ) % 2 != 0: middle += 1 else: UpperCAmelCase_ : int = 0 for i in range(0 , len(__snake_case ) ): if v[i] + v[len(__snake_case ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
29
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 16 UpperCAmelCase__ = 32 def UpperCAmelCase_ ( __snake_case , __snake_case = 16 ) -> str: """simple docstring""" _lowercase =AutoTokenizer.from_pretrained('''bert-base-cased''' ) _lowercase =load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__snake_case ): # max_length=None => use the model max length (it's actually the default) _lowercase =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowercase =datasets.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowercase =tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowercase =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowercase =16 elif accelerator.mixed_precision != "no": _lowercase =8 else: _lowercase =None return tokenizer.pad( __snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. _lowercase =DataLoader( tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) _lowercase =DataLoader( tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1": _lowercase =2 # New Code # _lowercase =int(args.gradient_accumulation_steps ) _lowercase =int(args.local_sgd_steps ) # Initialize accelerator _lowercase =Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowercase =config['''lr'''] _lowercase =int(config['''num_epochs'''] ) _lowercase =int(config['''seed'''] ) _lowercase =int(config['''batch_size'''] ) _lowercase =evaluate.load('''glue''' , '''mrpc''' ) set_seed(__snake_case ) _lowercase , _lowercase =get_dataloaders(__snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowercase =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowercase =model.to(accelerator.device ) # Instantiate optimizer _lowercase =AdamW(params=model.parameters() , lr=__snake_case ) # Instantiate scheduler _lowercase =get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() with LocalSGD( accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__snake_case ): _lowercase =model(**__snake_case ) _lowercase =output.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowercase =model(**__snake_case ) _lowercase =outputs.logits.argmax(dim=-1 ) _lowercase , _lowercase =accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) _lowercase =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , __snake_case ) def UpperCAmelCase_ ( ) -> Optional[int]: """simple docstring""" _lowercase =argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument( '''--local_sgd_steps''' , type=__snake_case , default=8 , help='''Number of local SGD steps or None to disable local SGD''' ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) _lowercase =parser.parse_args() _lowercase ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
5
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
0
import random def __lowerCAmelCase ( a__ ) -> bool: __a = num - 1 __a = 0 while s % 2 == 0: __a = s // 2 t += 1 for _ in range(5 ): __a = random.randrange(2 , num - 1 ) __a = pow(a__ , a__ , a__ ) if v != 1: __a = 0 while v != (num - 1): if i == t - 1: return False else: __a = i + 1 __a = (v**2) % num return True def __lowerCAmelCase ( a__ ) -> bool: if num < 2: return False __a = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(a__ ) def __lowerCAmelCase ( a__ = 1024 ) -> int: while True: __a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(a__ ): return num if __name__ == "__main__": A : Any = generate_large_prime() print(('Prime number:', num)) print(('is_prime_low_num:', is_prime_low_num(num)))
6
__UpperCAmelCase = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
29
0
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 600851475143 ) -> int: '''simple docstring''' try: A__ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) A__ = 2 A__ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A__ = i while n % i == 0: A__ = n // i i += 1 return int(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(f"""{solution() = }""")
7
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : "DiagonalGaussianDistribution" class lowerCamelCase (_snake_case , _snake_case ): '''simple docstring''' _snake_case : Optional[int] = True @register_to_config def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]: super().__init__() # pass init params to Encoder UpperCAmelCase_ : List[str] = Encoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , ) # pass init params to Decoder UpperCAmelCase_ : Dict = Decoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , ) UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 ) UpperCAmelCase_ : Any = False UpperCAmelCase_ : int = False # only relevant if vae tiling is enabled UpperCAmelCase_ : Optional[int] = self.config.sample_size UpperCAmelCase_ : int = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) UpperCAmelCase_ : Optional[Any] = 0.25 def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]: if isinstance(_UpperCamelCase , (Encoder, Decoder) ): UpperCAmelCase_ : Union[str, Any] = value def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int: UpperCAmelCase_ : Tuple = use_tiling def __UpperCAmelCase ( self ) -> Dict: self.enable_tiling(_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : str = True def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Optional[int] = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]: UpperCAmelCase_ : Optional[int] = {} def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): UpperCAmelCase_ : Optional[int] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return processors def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() ) if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): if not isinstance(_UpperCamelCase , _UpperCamelCase ): module.set_processor(_UpperCamelCase ) else: module.set_processor(processor.pop(f"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase ) if self.use_slicing and x.shape[0] > 1: UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )] UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase ) UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )] UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase ) for y in range(_UpperCamelCase ): UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase ) for x in range(_UpperCamelCase ): UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. UpperCAmelCase_ : List[str] = [] for i in range(0 , x.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : Any = [] for j in range(0 , x.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : str = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 ) UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. UpperCAmelCase_ : Union[str, Any] = [] for i in range(0 , z.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = [] for j in range(0 , z.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : Optional[Any] = sample UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist if sample_posterior: UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase ) else: UpperCAmelCase_ : int = posterior.mode() UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase )
29
0
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError('''Quantized models are not supported.''' ) snake_case_ = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , SCREAMING_SNAKE_CASE__ ) if matches: snake_case_ = float(matches[1] ) snake_case_ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". snake_case_ = 1001 snake_case_ = '''imagenet-1k-id2label.json''' snake_case_ = '''huggingface/label-files''' snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ = {int(SCREAMING_SNAKE_CASE__ ) + 1: v for k, v in idalabel.items()} snake_case_ = '''background''' snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} return config def __SCREAMING_SNAKE_CASE (): snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): snake_case_ = get_mobilenet_va_config(SCREAMING_SNAKE_CASE__ ) # Load 🤗 model snake_case_ = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor snake_case_ = MobileNetVaImageProcessor( crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , ) snake_case_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) snake_case_ = model(**SCREAMING_SNAKE_CASE__ ) snake_case_ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": snake_case_ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": snake_case_ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: snake_case_ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print('''Pushing to the hub...''' ) snake_case_ = '''google/''' + model_name image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ ) model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
8
def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
29
0
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _lowercase ( A__ ): '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :Distribution , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Any=0 ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 if scale is None else scale __SCREAMING_SNAKE_CASE : List[str] = 0.0 if loc is None else loc super().__init__(lowerCAmelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCAmelCase__ )] ) @property def __magic_name__( self :List[Any] ) -> Any: return self.base_dist.mean * self.scale + self.loc @property def __magic_name__( self :List[str] ) -> List[Any]: return self.base_dist.variance * self.scale**2 @property def __magic_name__( self :Union[str, Any] ) -> Optional[int]: return self.variance.sqrt() class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Callable[..., Tuple[torch.Tensor]] , **lowerCAmelCase__ :Tuple ) -> None: super().__init__(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = args_dim __SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList([nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) for dim in args_dim.values()] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = domain_map def __magic_name__( self :int , lowerCAmelCase__ :torch.Tensor ) -> Tuple[torch.Tensor]: __SCREAMING_SNAKE_CASE : str = [proj(lowerCAmelCase__ ) for proj in self.proj] return self.domain_map(*lowerCAmelCase__ ) class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :Dict ) -> Dict: super().__init__() __SCREAMING_SNAKE_CASE : int = function def __magic_name__( self :int , lowerCAmelCase__ :Optional[int] , *lowerCAmelCase__ :Optional[int] ) -> Optional[Any]: return self.function(lowerCAmelCase__ , *lowerCAmelCase__ ) class _lowercase : '''simple docstring''' SCREAMING_SNAKE_CASE__ : type SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : Dict[str, int] def __init__( self :List[str] , lowerCAmelCase__ :int = 1 ) -> None: __SCREAMING_SNAKE_CASE : str = dim __SCREAMING_SNAKE_CASE : str = {k: dim * self.args_dim[k] for k in self.args_dim} def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> Dict: if self.dim == 1: return self.distribution_class(*lowerCAmelCase__ ) else: return Independent(self.distribution_class(*lowerCAmelCase__ ) , 1 ) def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , ) -> Distribution: __SCREAMING_SNAKE_CASE : List[Any] = self._base_distribution(lowerCAmelCase__ ) if loc is None and scale is None: return distr else: return AffineTransformed(lowerCAmelCase__ , loc=lowerCAmelCase__ , scale=lowerCAmelCase__ , event_dim=self.event_dim ) @property def __magic_name__( self :int ) -> Tuple: return () if self.dim == 1 else (self.dim,) @property def __magic_name__( self :int ) -> int: return len(self.event_shape ) @property def __magic_name__( self :Union[str, Any] ) -> float: return 0.0 def __magic_name__( self :List[Any] , lowerCAmelCase__ :int ) -> nn.Module: return ParameterProjection( in_features=lowerCAmelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __magic_name__( self :int , *lowerCAmelCase__ :torch.Tensor ) -> List[str]: raise NotImplementedError() @staticmethod def __magic_name__( lowerCAmelCase__ :torch.Tensor ) -> torch.Tensor: return (x + torch.sqrt(torch.square(lowerCAmelCase__ ) + 4.0 )) / 2.0 class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} SCREAMING_SNAKE_CASE__ : type = StudentT @classmethod def __magic_name__( cls :List[Any] , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor ) -> List[Any]: __SCREAMING_SNAKE_CASE : Optional[Any] = cls.squareplus(lowerCAmelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps ) __SCREAMING_SNAKE_CASE : str = 2.0 + cls.squareplus(lowerCAmelCase__ ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict[str, int] = {"loc": 1, "scale": 1} SCREAMING_SNAKE_CASE__ : type = Normal @classmethod def __magic_name__( cls :Union[str, Any] , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor ) -> Any: __SCREAMING_SNAKE_CASE : Dict = cls.squareplus(lowerCAmelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict[str, int] = {"total_count": 1, "logits": 1} SCREAMING_SNAKE_CASE__ : type = NegativeBinomial @classmethod def __magic_name__( cls :List[str] , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[Any] = cls.squareplus(lowerCAmelCase__ ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> Distribution: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = distr_args if self.dim == 1: return self.distribution_class(total_count=lowerCAmelCase__ , logits=lowerCAmelCase__ ) else: return Independent(self.distribution_class(total_count=lowerCAmelCase__ , logits=lowerCAmelCase__ ) , 1 ) def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None ) -> Distribution: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
9
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.txt'} __UpperCAmelCase = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __UpperCAmelCase = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __UpperCAmelCase = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : int = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_INIT_CONFIGURATION _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Any = ConvBertTokenizer def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars ): UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) ) UpperCAmelCase_ : str = do_lower_case UpperCAmelCase_ : List[Any] = strip_accents UpperCAmelCase_ : str = tokenize_chinese_chars UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase ) UpperCAmelCase_ : Any = do_lower_case def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]: UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]: UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
29
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any: '''simple docstring''' lowerCamelCase__: Optional[Any] =[[1, 2, 4], [1, 2, 3, 4]] lowerCamelCase__: Any =DisjunctiveConstraint(UpperCAmelCase_) self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase_)) with self.assertRaises(UpperCAmelCase_): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(UpperCAmelCase_): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]: '''simple docstring''' lowerCamelCase__: str =[[1, 2], [1, 2, 3, 4]] with self.assertRaises(UpperCAmelCase_): DisjunctiveConstraint(UpperCAmelCase_) # fails here def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: Tuple =[[1, 2, 3], [1, 2, 4]] lowerCamelCase__: List[Any] =DisjunctiveConstraint(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =dc.update(1) lowerCamelCase__: str =stepped is True and completed is False and reset is False self.assertTrue(UpperCAmelCase_) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =dc.update(2) lowerCamelCase__: Optional[Any] =stepped is True and completed is False and reset is False self.assertTrue(UpperCAmelCase_) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =dc.update(3) lowerCamelCase__: Tuple =stepped is True and completed is True and reset is False self.assertTrue(UpperCAmelCase_) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' lowerCamelCase__: List[str] =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowerCamelCase__: Dict =DisjunctiveConstraint(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
10
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = '''efficientformer''' def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None: super().__init__(**_UpperCamelCase ) UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = hidden_sizes UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Optional[Any] = depths UpperCAmelCase_ : List[Any] = mlp_expansion_ratio UpperCAmelCase_ : List[str] = downsamples UpperCAmelCase_ : List[Any] = dim UpperCAmelCase_ : Tuple = key_dim UpperCAmelCase_ : Optional[int] = attention_ratio UpperCAmelCase_ : str = resolution UpperCAmelCase_ : Dict = pool_size UpperCAmelCase_ : Union[str, Any] = downsample_patch_size UpperCAmelCase_ : List[str] = downsample_stride UpperCAmelCase_ : List[str] = downsample_pad UpperCAmelCase_ : Any = drop_path_rate UpperCAmelCase_ : Dict = num_metaad_blocks UpperCAmelCase_ : Dict = distillation UpperCAmelCase_ : int = use_layer_scale UpperCAmelCase_ : Any = layer_scale_init_value UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Dict = batch_norm_eps
29
0
def _UpperCAmelCase (UpperCamelCase__ : int = 2000000 ): _A : Tuple = [0 for i in range(n + 1 )] _A : Union[str, Any] = 1 _A : Optional[Any] = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , UpperCamelCase__ ): _A : Union[str, Any] = 1 _A : Union[str, Any] = 0 for i in range(UpperCamelCase__ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"{solution() = }")
11
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[PIL.Image.Image, np.ndarray] class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any: super().__init__() self.register_modules( prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: if latents is None: UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : int = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> int: if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str: if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ): UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 ) if not isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 ) UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state'] UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]: if isinstance(_UpperCamelCase , PIL.Image.Image ): UpperCAmelCase_ : Tuple = 1 elif isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : str = image.shape[0] elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase ) else: raise ValueError( f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : Tuple = self._execution_device UpperCAmelCase_ : str = batch_size * num_images_per_prompt UpperCAmelCase_ : str = guidance_scale > 1.0 UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # prior self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ : int = self.scheduler.timesteps UpperCAmelCase_ : int = self.prior.config.num_embeddings UpperCAmelCase_ : Any = self.prior.config.embedding_dim UpperCAmelCase_ : List[str] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : int = self.prior( _UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding # remove the variance UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = [] for i, latent in enumerate(_UpperCamelCase ): print() UpperCAmelCase_ : List[str] = self.renderer.decode( latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase ) if output_type not in ["np", "pil"]: raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" ) UpperCAmelCase_ : Dict = images.cpu().numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images] # Offload last model to CPU if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_UpperCamelCase )
29
0
import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency UpperCAmelCase_ = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } UpperCAmelCase_ = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' UpperCAmelCase_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def lowerCamelCase__ ( A__ : tuple ): '''simple docstring''' return x[0] def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = get_letter_count(A__ ) __lowerCamelCase = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(A__ ) __lowerCamelCase = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=A__ ) __lowerCamelCase = """""".join(freq_to_letter[freq] ) __lowerCamelCase = list(freq_to_letter_str.items() ) freq_pairs.sort(key=A__ , reverse=A__ ) __lowerCamelCase = [freq_pair[1] for freq_pair in freq_pairs] return "".join(A__ ) def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = get_frequency_order(A__ ) __lowerCamelCase = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
12
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline _snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) _snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __UpperCAmelCase ( self ) -> Optional[Any]: return self._get_superresolution_dummy_components() def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any: if str(_UpperCamelCase ).startswith('mps' ): UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase ) else: UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __UpperCAmelCase ( self ) -> Dict: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __UpperCAmelCase ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __UpperCAmelCase ( self ) -> List[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __UpperCAmelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
29
0
import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __lowercase ( unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Optional[int] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING _UpperCAmelCase : int = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Any = AudioClassificationPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__) # test with a raw waveform SCREAMING_SNAKE_CASE_: Union[str, Any] = np.zeros((3_4000,)) SCREAMING_SNAKE_CASE_: List[Any] = np.zeros((1_4000,)) return audio_classifier, [audioa, audio] def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = examples SCREAMING_SNAKE_CASE_: List[str] = audio_classifier(lowerCAmelCase__) # by default a model is initialized with num_labels=2 self.assertEqual( lowerCAmelCase__ , [ {"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)}, {"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)}, ] , ) SCREAMING_SNAKE_CASE_: Dict = audio_classifier(lowerCAmelCase__ , top_k=1) self.assertEqual( lowerCAmelCase__ , [ {"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)}, ] , ) self.run_torchaudio(lowerCAmelCase__) @require_torchaudio def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Dict): import datasets # test with a local file SCREAMING_SNAKE_CASE_: Optional[Any] = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation") SCREAMING_SNAKE_CASE_: List[Any] = dataset[0]["audio"]["array"] SCREAMING_SNAKE_CASE_: Tuple = audio_classifier(lowerCAmelCase__) self.assertEqual( lowerCAmelCase__ , [ {"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)}, {"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)}, ] , ) @require_torch def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: List[str] = "anton-l/wav2vec2-random-tiny-classifier" SCREAMING_SNAKE_CASE_: Tuple = pipeline("audio-classification" , model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = np.ones((8000,)) SCREAMING_SNAKE_CASE_: Optional[int] = audio_classifier(lowerCAmelCase__ , top_k=4) SCREAMING_SNAKE_CASE_: List[Any] = [ {"score": 0.0842, "label": "no"}, {"score": 0.0838, "label": "up"}, {"score": 0.0837, "label": "go"}, {"score": 0.0834, "label": "right"}, ] SCREAMING_SNAKE_CASE_: List[Any] = [ {"score": 0.0845, "label": "stop"}, {"score": 0.0844, "label": "on"}, {"score": 0.0841, "label": "right"}, {"score": 0.0834, "label": "left"}, ] self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) SCREAMING_SNAKE_CASE_: int = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate} SCREAMING_SNAKE_CASE_: str = audio_classifier(lowerCAmelCase__ , top_k=4) self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) @require_torch @slow def _SCREAMING_SNAKE_CASE ( self : Any): import datasets SCREAMING_SNAKE_CASE_: Optional[Any] = "superb/wav2vec2-base-superb-ks" SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("audio-classification" , model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test") SCREAMING_SNAKE_CASE_: List[Any] = np.array(dataset[3]["speech"] , dtype=np.floataa) SCREAMING_SNAKE_CASE_: str = audio_classifier(lowerCAmelCase__ , top_k=4) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=3) , [ {"score": 0.981, "label": "go"}, {"score": 0.007, "label": "up"}, {"score": 0.006, "label": "_unknown_"}, {"score": 0.001, "label": "down"}, ] , ) @require_tf @unittest.skip("Audio classification is not implemented for TF") def _SCREAMING_SNAKE_CASE ( self : List[str]): pass
13
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict: logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) UpperCAmelCase_ : Any = model UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase ) def __call__( self , **_UpperCamelCase ) -> str: UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()} return self.model.run(_UpperCamelCase , _UpperCamelCase ) @staticmethod def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) UpperCAmelCase_ : List[str] = 'CPUExecutionProvider' return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict: UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name ) UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase ) if src_path.exists(): UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase ) try: shutil.copyfile(_UpperCamelCase , _UpperCamelCase ) except shutil.SameFileError: pass def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]: if os.path.isfile(_UpperCamelCase ): logger.error(f"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) # saving model weights/files self._save_pretrained(_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]: UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model( os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) UpperCAmelCase_ : Tuple = Path(_UpperCamelCase ) # load model from hub else: # download model UpperCAmelCase_ : List[str] = hf_hub_download( repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , ) UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase ) return cls(model=_UpperCamelCase , **_UpperCamelCase ) @classmethod def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]: UpperCAmelCase_ : List[str] = None if len(str(_UpperCamelCase ).split('@' ) ) == 2: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' ) return cls._from_pretrained( model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
29
0
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE :Optional[Any] = 'tiny-wmt19-en-ru' # Build # borrowed from a test SCREAMING_SNAKE_CASE :Union[str, Any] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] SCREAMING_SNAKE_CASE :Optional[Any] = dict(zip(vocab, range(len(vocab)))) SCREAMING_SNAKE_CASE :Tuple = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE :Optional[int] = Path(tmpdirname) SCREAMING_SNAKE_CASE :Optional[int] = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] SCREAMING_SNAKE_CASE :Any = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] SCREAMING_SNAKE_CASE :str = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) SCREAMING_SNAKE_CASE :Tuple = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) SCREAMING_SNAKE_CASE :List[str] = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) SCREAMING_SNAKE_CASE :Tuple = FSMTForConditionalGeneration(config) print(f'''num of params {tiny_model.num_parameters()}''') # Test SCREAMING_SNAKE_CASE :List[str] = tokenizer(['Making tiny model'], return_tensors='pt') SCREAMING_SNAKE_CASE :str = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-ru
15
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : Tuple = datasets.Features( { 'tokens': datasets.Sequence(datasets.Value('string' ) ), 'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ), 'answers': datasets.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), 'id': datasets.Value('int64' ), } ) UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict( { 'tokens': [['foo'] * 5] * n, 'labels': [[1] * 5] * n, 'answers': [{'answer_start': [97], 'text': ['1976']}] * 10, 'id': list(range(__snake_case ) ), } , features=__snake_case , ) return dataset @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' ) dataset.map(cache_file_name=__snake_case ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt' UpperCAmelCase_ : Tuple = FILE_CONTENT with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' import bza UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2' UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' ) with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' ) UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' ) with gzip.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lza.frame.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ): '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z' with pyazr.SevenZipFile(__snake_case , 'w' ) as archive: archive.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ): '''simple docstring''' import tarfile UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' import lzma UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz' UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' ) with lzma.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ): '''simple docstring''' import zipfile UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst' UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' ) with zstd.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml' UpperCAmelCase_ : List[Any] = textwrap.dedent( '\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' ) with open(__snake_case , 'w' ) as f: f.write(__snake_case ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case ) UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' ) dataset.map(cache_file_name=__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' ) with contextlib.closing(sqlitea.connect(__snake_case ) ) as con: UpperCAmelCase_ : List[Any] = con.cursor() cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' ) for item in DATA: cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' ) with open(__snake_case , 'w' , newline='' ) as f: UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any ): '''simple docstring''' import bza UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2' with open(__snake_case , 'rb' ) as f: UpperCAmelCase_ : int = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(__snake_case , 'wb' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) ) f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' ) UpperCAmelCase_ : Dict = pa.schema( { 'col_1': pa.string(), 'col_2': pa.intaa(), 'col_3': pa.floataa(), } ) with open(__snake_case , 'wb' ) as f: UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case ) UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case ) writer.write_table(__snake_case ) writer.close() return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Optional[int] = {'data': DATA} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS} with open(__snake_case , 'w' ) as f: json.dump(__snake_case , __snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_312: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' ) with open(__snake_case , 'w' ) as f: for item in DATA_STR: f.write(json.dumps(__snake_case ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Dict ): '''simple docstring''' import gzip UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : int , __snake_case : Any ): '''simple docstring''' import gzip UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' ) with open(__snake_case , 'rb' ) as orig_file: with gzip.open(__snake_case , 'wb' ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) f.add(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar' with tarfile.TarFile(__snake_case , 'w' ) as f: f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Any = ['0', '1', '2', '3'] UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3'] UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' ) with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Dict = ['0', '1', '2', '3'] UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc' with open(__snake_case , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) ) f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] ) UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(__snake_case ) return path @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' ) @pytest.fixture(scope='session' ) def lowercase__ ( ): '''simple docstring''' return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' ) @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : str , __snake_case : List[str] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip' with zipfile.ZipFile(__snake_case , 'w' ) as f: f.write(__snake_case , arcname=os.path.basename(__snake_case ) ) f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) ) return path @pytest.fixture(scope='session' ) def lowercase__ ( __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' ) (data_dir / "subdir").mkdir() with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden file with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) return data_dir
29
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def __UpperCAmelCase ( __lowerCamelCase ) -> List[List[ImageInput]]: if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__lowerCamelCase ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = ["pixel_values"] def __init__( self : List[Any] ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : List[str] ,) -> None: """simple docstring""" super().__init__(**_snake_case ) lowercase__ : int = size if size is not None else {'''shortest_edge''': 256} lowercase__ : Union[str, Any] = get_size_dict(_snake_case ,default_to_square=_snake_case ) lowercase__ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase__ : Optional[Any] = get_size_dict(_snake_case ,param_name='''crop_size''' ) lowercase__ : List[Any] = do_resize lowercase__ : Optional[int] = size lowercase__ : Union[str, Any] = do_center_crop lowercase__ : int = crop_size lowercase__ : List[str] = resample lowercase__ : int = do_rescale lowercase__ : Tuple = rescale_factor lowercase__ : List[Any] = offset lowercase__ : Optional[int] = do_normalize lowercase__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase ( self : Optional[int] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : List[str] ,) -> np.ndarray: """simple docstring""" lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ) if "shortest_edge" in size: lowercase__ : Optional[int] = get_resize_output_image_size(_snake_case ,size['''shortest_edge'''] ,default_to_square=_snake_case ) elif "height" in size and "width" in size: lowercase__ : Optional[Any] = (size['''height'''], size['''width''']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Union[str, Any] ,) -> np.ndarray: """simple docstring""" lowercase__ : Dict = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case ) def UpperCAmelCase ( self : List[str] ,_snake_case : np.ndarray ,_snake_case : Union[int, float] ,_snake_case : bool = True ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> Any: """simple docstring""" lowercase__ : List[Any] = image.astype(np.floataa ) if offset: lowercase__ : List[str] = image - (scale / 2) return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCAmelCase ( self : List[str] ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> np.ndarray: """simple docstring""" return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : ImageInput ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : bool = None ,_snake_case : float = None ,_snake_case : bool = None ,_snake_case : bool = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST ,) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. lowercase__ : Dict = to_numpy_array(_snake_case ) if do_resize: lowercase__ : Union[str, Any] = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) if do_center_crop: lowercase__ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case ) if do_rescale: lowercase__ : List[Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case ) if do_normalize: lowercase__ : List[Any] = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) lowercase__ : List[str] = to_channel_dimension_format(_snake_case ,_snake_case ) return image def UpperCAmelCase ( self : List[Any] ,_snake_case : ImageInput ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : bool = None ,_snake_case : float = None ,_snake_case : bool = None ,_snake_case : bool = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : ChannelDimension = ChannelDimension.FIRST ,**_snake_case : Dict ,) -> PIL.Image.Image: """simple docstring""" lowercase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize lowercase__ : int = resample if resample is not None else self.resample lowercase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : str = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Optional[Any] = offset if offset is not None else self.offset lowercase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : Any = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = size if size is not None else self.size lowercase__ : Union[str, Any] = get_size_dict(_snake_case ,default_to_square=_snake_case ) lowercase__ : Tuple = crop_size if crop_size is not None else self.crop_size lowercase__ : Union[str, Any] = get_size_dict(_snake_case ,param_name='''crop_size''' ) if not valid_images(_snake_case ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) lowercase__ : Any = make_batched(_snake_case ) lowercase__ : Optional[int] = [ [ self._preprocess_image( image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,) for img in video ] for video in videos ] lowercase__ : Dict = {'''pixel_values''': videos} return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
16
from __future__ import annotations def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position UpperCAmelCase_ : str = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ : Optional[Any] = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__snake_case ) return permissible_positions def lowercase__ ( __snake_case : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' if is_complete(__snake_case ): return True for position in get_valid_pos(__snake_case , len(__snake_case ) ): UpperCAmelCase_ , UpperCAmelCase_ : Any = position if board[y][x] == 0: UpperCAmelCase_ : Optional[Any] = curr + 1 if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ): return True UpperCAmelCase_ : List[Any] = 0 return False def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): UpperCAmelCase_ : Optional[Any] = 1 if open_knight_tour_helper(__snake_case , (i, j) , 1 ): return board UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
29
0
"""simple docstring""" _a = 8.3_144_598 def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float) -> float: '''simple docstring''' if temperature < 0: raise Exception("Temperature cannot be less than 0 K") if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol") else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example _a = 3_00 _a = 28 _a = rms_speed_of_molecule(temperature, molar_mass) print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
17
def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase_ : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , __snake_case ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __UpperCAmelCase = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: __UpperCAmelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
29
0
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( A__ , unittest.TestCase ): A = KandinskyVaaPriorPipeline A = ['prompt'] A = ['prompt', 'negative_prompt'] A = [ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] A = False @property def __UpperCamelCase ( self : int ): """simple docstring""" return 32 @property def __UpperCamelCase ( self : int ): """simple docstring""" return 32 @property def __UpperCamelCase ( self : List[Any] ): """simple docstring""" return self.time_input_dim @property def __UpperCamelCase ( self : Dict ): """simple docstring""" return self.time_input_dim * 4 @property def __UpperCamelCase ( self : List[Any] ): """simple docstring""" return 100 @property def __UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def __UpperCamelCase ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : str = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,) return CLIPTextModelWithProjection(_A ) @property def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Tuple = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } SCREAMING_SNAKE_CASE_ : List[Any] = PriorTransformer(**_A ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 SCREAMING_SNAKE_CASE_ : Dict = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def __UpperCamelCase ( self : Any ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Tuple = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size,image_size=224,projection_dim=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_channels=3,num_hidden_layers=5,patch_size=14,) SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPVisionModelWithProjection(_A ) return model @property def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = CLIPImageProcessor( crop_size=224,do_center_crop=_A,do_normalize=_A,do_resize=_A,image_mean=[0.48145466, 0.4578275, 0.40821073],image_std=[0.26862954, 0.26130258, 0.27577711],resample=3,size=224,) return image_processor def __UpperCamelCase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.dummy_prior SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_image_encoder SCREAMING_SNAKE_CASE_ : Any = self.dummy_text_encoder SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_tokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_image_processor SCREAMING_SNAKE_CASE_ : Any = UnCLIPScheduler( variance_type="fixed_small_log",prediction_type="sample",num_train_timesteps=1000,clip_sample=_A,clip_sample_range=10.0,) SCREAMING_SNAKE_CASE_ : str = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def __UpperCamelCase ( self : Tuple,_A : int,_A : Dict=0 ): """simple docstring""" if str(_A ).startswith("mps" ): SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(_A ) else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A ) SCREAMING_SNAKE_CASE_ : str = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = "cpu" SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pipeline_class(**_A ) SCREAMING_SNAKE_CASE_ : List[str] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) SCREAMING_SNAKE_CASE_ : List[str] = pipe(**self.get_dummy_inputs(_A ) ) SCREAMING_SNAKE_CASE_ : int = output.image_embeds SCREAMING_SNAKE_CASE_ : Any = pipe( **self.get_dummy_inputs(_A ),return_dict=_A,)[0] SCREAMING_SNAKE_CASE_ : Any = image[0, -10:] SCREAMING_SNAKE_CASE_ : List[Any] = image_from_tuple[0, -10:] assert image.shape == (1, 32) SCREAMING_SNAKE_CASE_ : Optional[int] = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def __UpperCamelCase ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = torch_device == "cpu" SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : List[str] = False self._test_inference_batch_single_identical( test_max_difference=_A,relax_max_difference=_A,test_mean_pixel_difference=_A,) @skip_mps def __UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = torch_device == "cpu" SCREAMING_SNAKE_CASE_ : Dict = False self._test_attention_slicing_forward_pass( test_max_difference=_A,test_mean_pixel_difference=_A,)
18
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) self.check_model_type(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {} if padding is not None: UpperCAmelCase_ : List[str] = padding if truncation is not None: UpperCAmelCase_ : Tuple = truncation if top_k is not None: UpperCAmelCase_ : Dict = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int: if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question} else: UpperCAmelCase_ : List[str] = image UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = load_image(inputs['image'] ) UpperCAmelCase_ : Dict = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase ) UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) model_inputs.update(_UpperCamelCase ) return model_inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : Any = self.model(**_UpperCamelCase ) return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str: if top_k > self.model.config.num_labels: UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase_ : Optional[Any] = scores.tolist() UpperCAmelCase_ : Tuple = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
29
0
import math def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ): lowerCamelCase_ = end or len(lowerCamelCase__ ) for i in range(lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = i lowerCamelCase_ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: lowerCamelCase_ = array[temp_index - 1] temp_index -= 1 lowerCamelCase_ = temp_index_value return array def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap lowerCamelCase_ = index lowerCamelCase_ = 2 * index + 1 # Left Node lowerCamelCase_ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: lowerCamelCase_ = left_index if right_index < heap_size and array[largest] < array[right_index]: lowerCamelCase_ = right_index if largest != index: lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index] heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = len(lowerCamelCase__ ) for i in range(n // 2 , -1 , -1 ): heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for i in range(n - 1 , 0 , -1 ): lowerCamelCase_ , lowerCamelCase_ = array[0], array[i] heapify(lowerCamelCase__ , 0 , lowerCamelCase__ ) return array def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = low lowerCamelCase_ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i lowerCamelCase_ , lowerCamelCase_ = array[j], array[i] i += 1 def lowerCamelCase_ ( lowerCamelCase__ ): if len(lowerCamelCase__ ) == 0: return array lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) ) lowerCamelCase_ = 1_6 return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): while end - start > size_threshold: if max_depth == 0: return heap_sort(lowerCamelCase__ ) max_depth -= 1 lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 ) lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = p return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() __A =input('''Enter numbers separated by a comma : ''').strip() __A =[float(item) for item in user_input.split(''',''')] print(sort(unsorted))
19
import os # Precomputes a list of the 100 first triangular numbers __UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' ) UpperCAmelCase_ : Union[str, Any] = '' with open(__snake_case ) as f: UpperCAmelCase_ : List[Any] = f.readline() UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] UpperCAmelCase_ : Optional[int] = [ word for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__snake_case ) if __name__ == "__main__": print(solution())
29
0