code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCAmelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCAmelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> VectorOut: '''simple docstring''' return np.sqrt(np.sum((np.asarray(_lowerCamelCase ) - np.asarray(_lowerCamelCase )) ** 2 ) ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> VectorOut: '''simple docstring''' return sum((va - va) ** 2 for va, va in zip(_lowerCamelCase , _lowerCamelCase ) ) ** (1 / 2) if __name__ == "__main__": def lowerCamelCase_( ) -> None: '''simple docstring''' from timeit import timeit print("Without Numpy" ) print( timeit( "euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) ) print("With Numpy" ) print( timeit( "euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) ) benchmark()
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A_ ( _a ): lowerCAmelCase__ = 'vit' def __init__( self: Tuple ,__lowerCAmelCase: Tuple=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: int=0.0 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: Any=16 ,__lowerCAmelCase: Optional[int]=3 ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: Dict=16 ,**__lowerCAmelCase: List[str] ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) _lowerCamelCase : int = hidden_size _lowerCamelCase : int = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : str = hidden_act _lowerCamelCase : Optional[int] = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Optional[int] = image_size _lowerCamelCase : Optional[int] = patch_size _lowerCamelCase : int = num_channels _lowerCamelCase : str = qkv_bias _lowerCamelCase : str = encoder_stride class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: List[Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return 1e-4
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1
"""simple docstring""" import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : Any = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} _lowerCAmelCase : Optional[int] = { '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } _lowerCAmelCase : Any = { '''abeja/gpt-neox-japanese-2.7b''': 2048, } def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any: '''simple docstring''' with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: _lowerCamelCase : Any = json.loads(f.read() ) _lowerCamelCase : str = collections.OrderedDict() _lowerCamelCase : List[Any] = collections.OrderedDict() _lowerCamelCase : List[Any] = collections.OrderedDict() with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: _lowerCamelCase : Union[str, Any] = f.readlines() _lowerCamelCase : Dict = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(_lowerCamelCase ): _lowerCamelCase : int = b _lowerCamelCase : Dict = idx for wd in b: _lowerCamelCase : Any = idx return vocab, raw_vocab, ids_to_tokens, emoji class A_ ( _a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] def __init__( self: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[str]="<|endoftext|>" ,__lowerCAmelCase: Any="<|endoftext|>" ,__lowerCAmelCase: Dict="<|startoftext|>" ,__lowerCAmelCase: List[str]="<|endoftext|>" ,__lowerCAmelCase: int=False ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__( unk_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,do_clean_text=__lowerCAmelCase ,**__lowerCAmelCase ,) if not os.path.isfile(__lowerCAmelCase ): raise ValueError( F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(__lowerCAmelCase ): raise ValueError( F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) _lowerCamelCase : Union[str, Any] = do_clean_text _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = load_vocab_and_emoji(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : List[Any] = SubWordJapaneseTokenizer( vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return len(self.raw_vocab ) def _lowercase ( self: List[str] ): '''simple docstring''' return dict(self.raw_vocab ,**self.added_tokens_encoder ) def _lowercase ( self: str ,__lowerCAmelCase: str ): '''simple docstring''' return self.subword_tokenizer.tokenize(__lowerCAmelCase ,clean=self.do_clean_text ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.vocab.get(__lowerCAmelCase ,self.vocab.get(self.unk_token ) ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(__lowerCAmelCase ) def _lowercase ( self: Dict ,__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = "".join(__lowerCAmelCase ).strip() return out_string def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: "Conversation" ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] ) if len(__lowerCAmelCase ) > self.model_max_length: _lowerCamelCase : List[Any] = input_ids[-self.model_max_length :] return input_ids def _lowercase ( self: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ): '''simple docstring''' _lowerCamelCase : str = 0 if os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : Dict = os.path.join( __lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) _lowerCamelCase : List[str] = os.path.join( __lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: _lowerCamelCase : str = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) _lowerCamelCase : Optional[int] = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(__lowerCAmelCase ,"w" ,encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" " Please check that the vocabulary is not corrupted!" ) _lowerCamelCase : Optional[Any] = token_index writer.write(",".join(__lowerCAmelCase ) + "\n" ) index += 1 with open(__lowerCAmelCase ,"w" ,encoding="utf-8" ) as writer: json.dump(self.emoji ,__lowerCAmelCase ) return vocab_file, emoji_file class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Any = vocab # same as swe _lowerCamelCase : Tuple = ids_to_tokens # same as bpe _lowerCamelCase : str = emoji _lowerCamelCase : Union[str, Any] = np.max([len(__lowerCAmelCase ) for w in self.vocab.keys()] ) _lowerCamelCase : Union[str, Any] = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) _lowerCamelCase : Optional[Any] = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) _lowerCamelCase : Any = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) _lowerCamelCase : int = re.compile( r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) _lowerCamelCase : Union[str, Any] = re.compile( r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) _lowerCamelCase : Tuple = re.compile( r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) _lowerCamelCase : List[Any] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" _lowerCamelCase : str = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" _lowerCamelCase : Optional[Any] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self: int ): '''simple docstring''' return len(self.ids_to_tokens ) def _lowercase ( self: Dict ,__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.content_repattera.sub("<URL>" ,__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.content_repattera.sub("<EMAIL>" ,__lowerCAmelCase ) _lowerCamelCase : int = self.content_repattera.sub("<TEL>" ,__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.content_repattera.sub("<DATE>" ,__lowerCAmelCase ) _lowerCamelCase : Tuple = self.content_repattera.sub("<DATE>" ,__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.content_repattera.sub("<PRICE>" ,__lowerCAmelCase ) _lowerCamelCase : int = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: _lowerCamelCase : int = content.replace("<BLOCK><BLOCK>" ,"<BLOCK>" ) return content def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[int]=False ): '''simple docstring''' _lowerCamelCase : List[str] = text.replace(" " ,"<SP>" ) _lowerCamelCase : Optional[int] = text.replace(" " ,"<SP>" ) _lowerCamelCase : Optional[Any] = text.replace("\r\n" ,"<BR>" ) _lowerCamelCase : Optional[int] = text.replace("\n" ,"<BR>" ) _lowerCamelCase : int = text.replace("\r" ,"<BR>" ) _lowerCamelCase : Tuple = text.replace("\t" ,"<TAB>" ) _lowerCamelCase : Dict = text.replace("—" ,"ー" ) _lowerCamelCase : List[Any] = text.replace("−" ,"ー" ) for k, v in self.emoji["emoji"].items(): if k in text: _lowerCamelCase : str = text.replace(__lowerCAmelCase ,__lowerCAmelCase ) if clean: _lowerCamelCase : Dict = self.clean_text(__lowerCAmelCase ) def check_simbol(__lowerCAmelCase: Optional[int] ): _lowerCamelCase : int = x.encode() if len(__lowerCAmelCase ) == 1 and len(__lowerCAmelCase ) == 2: _lowerCamelCase : str = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xC2A1 and c <= 0xC2BF) or (c >= 0xC780 and c <= 0xC783) or (c >= 0xCAB9 and c <= 0xCBBF) or (c >= 0xCC80 and c <= 0xCDA2) ): return True return False def checkuae(__lowerCAmelCase: Union[str, Any] ): _lowerCamelCase : List[str] = x.encode() if len(__lowerCAmelCase ) == 1 and len(__lowerCAmelCase ) == 3: _lowerCamelCase : Tuple = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xE2_8080 and c <= 0xE2_B07F: return True return False _lowerCamelCase : str = 0 _lowerCamelCase : Dict = [] while pos < len(__lowerCAmelCase ): _lowerCamelCase : Any = min(len(__lowerCAmelCase ) ,pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 _lowerCamelCase : Dict = [] # (token_id, token, pos) for e in range(__lowerCAmelCase ,__lowerCAmelCase ,-1 ): _lowerCamelCase : Any = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__lowerCAmelCase ) > 2: _lowerCamelCase : Any = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(__lowerCAmelCase ) > 0: # the smallest token_id is adopted _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = sorted(__lowerCAmelCase ,key=lambda __lowerCAmelCase : x[0] )[0] result.append(__lowerCAmelCase ) _lowerCamelCase : List[str] = e else: _lowerCamelCase : List[Any] = pos + 1 _lowerCamelCase : List[Any] = text[pos:end] if check_simbol(__lowerCAmelCase ): result.append("<KIGOU>" ) elif checkuae(__lowerCAmelCase ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) _lowerCamelCase : Optional[Any] = end return result def _lowercase ( self: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int="\n" ): '''simple docstring''' _lowerCamelCase : List[Any] = [] _lowerCamelCase : List[str] = [] _lowerCamelCase : List[str] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(__lowerCAmelCase ) > 0: words.append(bytearray(__lowerCAmelCase ).decode("utf-8" ,errors="replace" ) ) _lowerCamelCase : Tuple = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(__lowerCAmelCase ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: words.append(bytearray(__lowerCAmelCase ).decode("utf-8" ,errors="replace" ) ) _lowerCamelCase : Union[str, Any] = "".join(__lowerCAmelCase ) return text
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowerCAmelCase : List[str] = {'''tokenization_byt5''': ['''ByT5Tokenizer''']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys _lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
1
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
1
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _lowerCAmelCase : Optional[Any] = 8 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=BITS ) -> Any: '''simple docstring''' _lowerCamelCase : Optional[Any] = x.device _lowerCamelCase : Optional[Any] = (x * 255).int().clamp(0 , 255 ) _lowerCamelCase : Dict = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCamelCase ) _lowerCamelCase : str = rearrange(_lowerCamelCase , "d -> d 1 1" ) _lowerCamelCase : str = rearrange(_lowerCamelCase , "b c h w -> b c 1 h w" ) _lowerCamelCase : Optional[int] = ((x & mask) != 0).float() _lowerCamelCase : Any = rearrange(_lowerCamelCase , "b c d h w -> b (c d) h w" ) _lowerCamelCase : Optional[Any] = bits * 2 - 1 return bits def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=BITS ) -> Dict: '''simple docstring''' _lowerCamelCase : int = x.device _lowerCamelCase : Union[str, Any] = (x > 0).int() _lowerCamelCase : Union[str, Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCamelCase , dtype=torch.intaa ) _lowerCamelCase : str = rearrange(_lowerCamelCase , "d -> d 1 1" ) _lowerCamelCase : int = rearrange(_lowerCamelCase , "b (c d) h w -> b c d h w" , d=8 ) _lowerCamelCase : Dict = reduce(x * mask , "b c d h w -> b c h w" , "sum" ) return (dec / 255).clamp(0.0 , 1.0 ) def lowerCamelCase_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = True , _lowerCamelCase=None , _lowerCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]: '''simple docstring''' if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _lowerCamelCase : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _lowerCamelCase : str = self.alphas_cumprod[timestep] _lowerCamelCase : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _lowerCamelCase : str = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCamelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _lowerCamelCase : Dict = self.bit_scale if self.config.clip_sample: _lowerCamelCase : List[str] = torch.clamp(_lowerCamelCase , -scale , _lowerCamelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _lowerCamelCase : Optional[Any] = self._get_variance(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Dict = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _lowerCamelCase : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCamelCase : Union[str, Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCamelCase : int = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _lowerCamelCase : Dict = model_output.device if torch.is_tensor(_lowerCamelCase ) else "cpu" _lowerCamelCase : Optional[int] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_lowerCamelCase ).to(_lowerCamelCase ) _lowerCamelCase : Optional[Any] = self._get_variance(_lowerCamelCase , _lowerCamelCase ) ** 0.5 * eta * noise _lowerCamelCase : Dict = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_lowerCamelCase , pred_original_sample=_lowerCamelCase ) def lowerCamelCase_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="epsilon" , _lowerCamelCase=None , _lowerCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _lowerCamelCase, _lowerCamelCase : List[str] = torch.split(_lowerCamelCase , sample.shape[1] , dim=1 ) else: _lowerCamelCase : Union[str, Any] = None # 1. compute alphas, betas _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one _lowerCamelCase : Optional[int] = 1 - alpha_prod_t _lowerCamelCase : Any = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _lowerCamelCase : List[str] = model_output else: raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" _lowerCamelCase : Union[str, Any] = self.bit_scale if self.config.clip_sample: _lowerCamelCase : int = torch.clamp(_lowerCamelCase , -scale , _lowerCamelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : Any = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _lowerCamelCase : Optional[int] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : str = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_lowerCamelCase ).to(model_output.device ) _lowerCamelCase : Union[str, Any] = (self._get_variance(_lowerCamelCase , predicted_variance=_lowerCamelCase ) ** 0.5) * noise _lowerCamelCase : str = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_lowerCamelCase , pred_original_sample=_lowerCamelCase ) class A_ ( _a ): def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: Union[DDIMScheduler, DDPMScheduler] ,__lowerCAmelCase: Optional[float] = 1.0 ,): '''simple docstring''' super().__init__() _lowerCamelCase : Any = bit_scale _lowerCamelCase : Optional[int] = ( ddim_bit_scheduler_step if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else ddpm_bit_scheduler_step ) self.register_modules(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self: List[Any] ,__lowerCAmelCase: Optional[int] = 256 ,__lowerCAmelCase: Optional[int] = 256 ,__lowerCAmelCase: Optional[int] = 50 ,__lowerCAmelCase: Optional[torch.Generator] = None ,__lowerCAmelCase: Optional[int] = 1 ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,**__lowerCAmelCase: Optional[Any] ,): '''simple docstring''' _lowerCamelCase : List[str] = torch.randn( (batch_size, self.unet.config.in_channels, height, width) ,generator=__lowerCAmelCase ,) _lowerCamelCase : Optional[int] = decimal_to_bits(__lowerCAmelCase ) * self.bit_scale _lowerCamelCase : str = latents.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _lowerCamelCase : Optional[int] = self.unet(__lowerCAmelCase ,__lowerCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _lowerCamelCase : Any = self.scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample _lowerCamelCase : Union[str, Any] = bits_to_decimal(__lowerCAmelCase ) if output_type == "pil": _lowerCamelCase : Any = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCAmelCase )
340
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
1
"""simple docstring""" from collections.abc import Sequence from queue import Queue class A_ : def __init__( self: Dict ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Dict=None ): '''simple docstring''' _lowerCamelCase : List[Any] = start _lowerCamelCase : Tuple = end _lowerCamelCase : str = val _lowerCamelCase : Optional[Any] = (start + end) // 2 _lowerCamelCase : Any = left _lowerCamelCase : str = right def __repr__( self: Tuple ): '''simple docstring''' return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class A_ : def __init__( self: Any ,__lowerCAmelCase: Sequence ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : List[Any] = collection _lowerCamelCase : List[str] = function if self.collection: _lowerCamelCase : str = self._build_tree(0 ,len(__lowerCAmelCase ) - 1 ) def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' self._update_tree(self.root ,__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: int ,__lowerCAmelCase: int ,__lowerCAmelCase: str ): '''simple docstring''' return self._query_range(self.root ,__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: Dict ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ): '''simple docstring''' if start == end: return SegmentTreeNode(__lowerCAmelCase ,__lowerCAmelCase ,self.collection[start] ) _lowerCamelCase : Optional[int] = (start + end) // 2 _lowerCamelCase : Any = self._build_tree(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : List[Any] = self._build_tree(mid + 1 ,__lowerCAmelCase ) return SegmentTreeNode(__lowerCAmelCase ,__lowerCAmelCase ,self.fn(left.val ,right.val ) ,__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Tuple ): '''simple docstring''' if node.start == i and node.end == i: _lowerCamelCase : Any = val return if i <= node.mid: self._update_tree(node.left ,__lowerCAmelCase ,__lowerCAmelCase ) else: self._update_tree(node.right ,__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = self.fn(node.left.val ,node.right.val ) def _lowercase ( self: str ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left ,__lowerCAmelCase ,__lowerCAmelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left ,__lowerCAmelCase ,node.mid ) ,self._query_range(node.right ,node.mid + 1 ,__lowerCAmelCase ) ,) else: # range in right child tree return self._query_range(node.right ,__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.root is not None: _lowerCamelCase : List[str] = Queue() queue.put(self.root ) while not queue.empty(): _lowerCamelCase : int = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) _lowerCAmelCase : Any = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
340
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
1
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase : List[str] = logging.get_logger(__name__) class A_ : def __init__( self: List[Any] ,__lowerCAmelCase: str = None ,__lowerCAmelCase: uuid.UUID = None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Tuple=None ): '''simple docstring''' if not conversation_id: _lowerCamelCase : List[str] = uuid.uuida() if past_user_inputs is None: _lowerCamelCase : Dict = [] if generated_responses is None: _lowerCamelCase : Optional[Any] = [] _lowerCamelCase : uuid.UUID = conversation_id _lowerCamelCase : List[str] = past_user_inputs _lowerCamelCase : List[str] = generated_responses _lowerCamelCase : Optional[str] = text def __eq__( self: Optional[int] ,__lowerCAmelCase: List[str] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def _lowercase ( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: bool = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) _lowerCamelCase : Tuple = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: _lowerCamelCase : Optional[int] = text def _lowercase ( self: Tuple ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) _lowerCamelCase : str = None def _lowercase ( self: List[str] ,__lowerCAmelCase: str ): '''simple docstring''' self.generated_responses.append(__lowerCAmelCase ) def _lowercase ( self: Any ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self: List[str] ): '''simple docstring''' _lowerCamelCase : Dict = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): _lowerCamelCase : Dict = "user" if is_user else "bot" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( _a , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class A_ ( _a ): def __init__( self: Dict ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Any ): '''simple docstring''' super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase ) if self.tokenizer.pad_token_id is None: _lowerCamelCase : Tuple = self.tokenizer.eos_token def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[Any] = {} _lowerCamelCase : Any = {} _lowerCamelCase : Tuple = {} if min_length_for_response is not None: _lowerCamelCase : Union[str, Any] = min_length_for_response if minimum_tokens is not None: _lowerCamelCase : Dict = minimum_tokens if "max_length" in generate_kwargs: _lowerCamelCase : Optional[Any] = generate_kwargs["max_length"] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: _lowerCamelCase : Union[str, Any] = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__lowerCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self: Union[str, Any] ,__lowerCAmelCase: Union[Conversation, List[Conversation]] ,__lowerCAmelCase: Optional[int]=0 ,**__lowerCAmelCase: Any ): '''simple docstring''' _lowerCamelCase : int = super().__call__(__lowerCAmelCase ,num_workers=__lowerCAmelCase ,**__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1: return outputs[0] return outputs def _lowercase ( self: Any ,__lowerCAmelCase: Conversation ,__lowerCAmelCase: List[str]=32 ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): raise ValueError("ConversationalPipeline, expects Conversation as inputs" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ "Add user inputs with the conversation's `add_user_input` method" ) if hasattr(self.tokenizer ,"_build_conversation_input_ids" ): _lowerCamelCase : Tuple = self.tokenizer._build_conversation_input_ids(__lowerCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version _lowerCamelCase : Optional[int] = self._legacy_parse_and_tokenize(__lowerCAmelCase ) if self.framework == "pt": _lowerCamelCase : int = torch.LongTensor([input_ids] ) elif self.framework == "tf": _lowerCamelCase : Tuple = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=10 ,**__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = generate_kwargs.get("max_length" ,self.model.config.max_length ) _lowerCamelCase : Any = model_inputs["input_ids"].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) _lowerCamelCase : Union[str, Any] = max_length - minimum_tokens _lowerCamelCase : Optional[Any] = model_inputs["input_ids"][:, -trim:] if "attention_mask" in model_inputs: _lowerCamelCase : Dict = model_inputs["attention_mask"][:, -trim:] _lowerCamelCase : str = model_inputs.pop("conversation" ) _lowerCamelCase : Tuple = max_length _lowerCamelCase : Union[str, Any] = self.model.generate(**__lowerCAmelCase ,**__lowerCAmelCase ) if self.model.config.is_encoder_decoder: _lowerCamelCase : Dict = 1 else: _lowerCamelCase : Any = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: str=True ): '''simple docstring''' _lowerCamelCase : Tuple = model_outputs["output_ids"] _lowerCamelCase : Optional[Any] = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase ,) _lowerCamelCase : List[str] = model_outputs["conversation"] conversation.mark_processed() conversation.append_response(__lowerCAmelCase ) return conversation def _lowercase ( self: List[Any] ,__lowerCAmelCase: Conversation ): '''simple docstring''' _lowerCamelCase : Tuple = self.tokenizer.eos_token_id _lowerCamelCase : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) ) if len(__lowerCAmelCase ) > self.tokenizer.model_max_length: _lowerCamelCase : List[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
340
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
1
"""simple docstring""" import requests from bsa import BeautifulSoup def lowerCamelCase_( _lowerCamelCase = "AAPL" ) -> str: '''simple docstring''' _lowerCamelCase : str = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" _lowerCamelCase : Dict = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" ) _lowerCamelCase : str = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder _lowerCAmelCase : Tuple = '''base_with_context''' def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) ) _lowerCamelCase : Optional[Any] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCamelCase : List[Any] = weights[F"""layers_{lyr_num}"""] _lowerCamelCase : Dict = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) _lowerCamelCase : Optional[int] = ly_weight["attention"] _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCamelCase : Tuple = weights[F"""layers_{lyr_num}"""] _lowerCamelCase : Tuple = ly_weight["attention"] _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) _lowerCamelCase : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) _lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) _lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) ) _lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) ) _lowerCamelCase : Dict = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase ) _lowerCamelCase : List[str] = nn.Parameter( torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _lowerCamelCase : Union[str, Any] = weights[F"""layers_{lyr_num}"""] _lowerCamelCase : Dict = nn.Parameter( torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) ) _lowerCamelCase : Dict = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) ) _lowerCamelCase : Union[str, Any] = ly_weight["self_attention"] _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) _lowerCamelCase : Optional[int] = ly_weight["MultiHeadDotProductAttention_0"] _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) _lowerCamelCase : str = nn.Parameter( torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) ) _lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) _lowerCamelCase : int = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) ) _lowerCamelCase : str = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) ) return model def lowerCamelCase_( _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Optional[Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _lowerCamelCase : Dict = jnp.tree_util.tree_map(onp.array , _lowerCamelCase ) _lowerCamelCase : int = [ "from __gin__ import dynamic_registration", "from music_spectrogram_diffusion.models.diffusion import diffusion_utils", "diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0", "diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()", ] _lowerCamelCase : Tuple = os.path.join(args.checkpoint_path , ".." , "config.gin" ) _lowerCamelCase : Any = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : int = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase ) _lowerCamelCase : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" ) _lowerCamelCase : Dict = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) _lowerCamelCase : Dict = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) _lowerCamelCase : Optional[int] = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _lowerCamelCase : Union[str, Any] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowerCamelCase ) _lowerCamelCase : Dict = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowerCamelCase ) _lowerCamelCase : List[Any] = load_decoder(ta_checkpoint["target"]["decoder"] , _lowerCamelCase ) _lowerCamelCase : List[Any] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" ) _lowerCamelCase : Dict = SpectrogramDiffusionPipeline( notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument( '''--checkpoint_path''', default=f'''{MODEL}/checkpoint_500000''', type=str, required=False, help='''Path to the original jax model checkpoint.''', ) _lowerCAmelCase : Dict = parser.parse_args() main(args)
340
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
1
"""simple docstring""" import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ ( _a , unittest.TestCase ): lowerCAmelCase__ = FunnelTokenizer lowerCAmelCase__ = FunnelTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def _lowercase ( self: Any ): '''simple docstring''' super().setUp() _lowerCamelCase : Optional[int] = [ "<unk>", "<cls>", "<sep>", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _lowercase ( self: str ,**__lowerCAmelCase: List[Any] ): '''simple docstring''' return FunnelTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = "UNwant\u00E9d,running" _lowerCamelCase : Tuple = "unwanted, running" return input_text, output_text def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.tokenizer_class(self.vocab_file ) _lowerCamelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(__lowerCAmelCase ,["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[7, 4, 5, 10, 8, 9] ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: _lowerCamelCase : Union[str, Any] = tokenizer("UNwant\u00E9d,running" ) _lowerCamelCase : str = len(inputs["input_ids"] ) - 1 self.assertListEqual(inputs["token_type_ids"] ,[2] + [0] * sentence_len ) _lowerCamelCase : Tuple = tokenizer("UNwant\u00E9d,running" ,"UNwant\u00E9d,running" ) self.assertListEqual(inputs["token_type_ids"] ,[2] + [0] * sentence_len + [1] * sentence_len )
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = len(_lowerCamelCase ) _lowerCamelCase : List[str] = len(matrix[0] ) _lowerCamelCase : Optional[Any] = min(_lowerCamelCase , _lowerCamelCase ) for row in range(_lowerCamelCase ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , _lowerCamelCase ): _lowerCamelCase : Dict = matrix[col][row] / matrix[row][row] for i in range(_lowerCamelCase , _lowerCamelCase ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows _lowerCamelCase : List[Any] = True for i in range(row + 1 , _lowerCamelCase ): if matrix[i][row] != 0: _lowerCamelCase, _lowerCamelCase : List[str] = matrix[i], matrix[row] _lowerCamelCase : Optional[Any] = False break if reduce: rank -= 1 for i in range(_lowerCamelCase ): _lowerCamelCase : Any = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = F"""Input value of [number={number}] must be an integer""" raise TypeError(_lowerCamelCase ) if number < 0: return False _lowerCamelCase : str = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
1
"""simple docstring""" import numpy # List of input, output pairs _lowerCAmelCase : Union[str, Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _lowerCAmelCase : str = (((515, 22, 13), 555), ((61, 35, 49), 150)) _lowerCAmelCase : int = [2, 4, 1, 5] _lowerCAmelCase : Dict = len(train_data) _lowerCAmelCase : int = 0.009 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="train" ) -> int: '''simple docstring''' return calculate_hypothesis_value(_lowerCamelCase , _lowerCamelCase ) - output( _lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Union[str, Any] = 0 for i in range(len(_lowerCamelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: '''simple docstring''' if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=m ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Tuple = 0 for i in range(_lowerCamelCase ): if index == -1: summation_value += _error(_lowerCamelCase ) else: summation_value += _error(_lowerCamelCase ) * train_data[i][0][index] return summation_value def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : int = summation_of_cost_derivative(_lowerCamelCase , _lowerCamelCase ) / m return cost_derivative_value def lowerCamelCase_( ) -> str: '''simple docstring''' global parameter_vector # Tune these values to set a tolerance value for predicted output _lowerCamelCase : str = 0.0_0_0_0_0_2 _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[Any] = 0 while True: j += 1 _lowerCamelCase : int = [0, 0, 0, 0] for i in range(0 , len(_lowerCamelCase ) ): _lowerCamelCase : Any = get_cost_derivative(i - 1 ) _lowerCamelCase : Union[str, Any] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( _lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase , rtol=_lowerCamelCase , ): break _lowerCamelCase : List[str] = temp_parameter_vector print(("Number of iterations:", j) ) def lowerCamelCase_( ) -> int: '''simple docstring''' for i in range(len(_lowerCamelCase ) ): print(("Actual output value:", output(_lowerCamelCase , "test" )) ) print(("Hypothesis output:", calculate_hypothesis_value(_lowerCamelCase , "test" )) ) if __name__ == "__main__": run_gradient_descent() print('''\nTesting gradient descent for a linear hypothesis function.\n''') test_gradient_descent()
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" from math import pi, sqrt, tan def lowerCamelCase_( _lowerCamelCase ) -> float: '''simple docstring''' if side_length < 0: raise ValueError("surface_area_cube() only accepts non-negative values" ) return 6 * side_length**2 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if length < 0 or breadth < 0 or height < 0: raise ValueError("surface_area_cuboid() only accepts non-negative values" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def lowerCamelCase_( _lowerCamelCase ) -> float: '''simple docstring''' if radius < 0: raise ValueError("surface_area_sphere() only accepts non-negative values" ) return 4 * pi * radius**2 def lowerCamelCase_( _lowerCamelCase ) -> float: '''simple docstring''' if radius < 0: raise ValueError("surface_area_hemisphere() only accepts non-negative values" ) return 3 * pi * radius**2 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if radius < 0 or height < 0: raise ValueError("surface_area_cone() only accepts non-negative values" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( "surface_area_conical_frustum() only accepts non-negative values" ) _lowerCamelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if radius < 0 or height < 0: raise ValueError("surface_area_cylinder() only accepts non-negative values" ) return 2 * pi * radius * (height + radius) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if torus_radius < 0 or tube_radius < 0: raise ValueError("surface_area_torus() only accepts non-negative values" ) if torus_radius < tube_radius: raise ValueError( "surface_area_torus() does not support spindle or self intersecting tori" ) return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if length < 0 or width < 0: raise ValueError("area_rectangle() only accepts non-negative values" ) return length * width def lowerCamelCase_( _lowerCamelCase ) -> float: '''simple docstring''' if side_length < 0: raise ValueError("area_square() only accepts non-negative values" ) return side_length**2 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if base < 0 or height < 0: raise ValueError("area_triangle() only accepts non-negative values" ) return (base * height) / 2 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("area_triangle_three_sides() only accepts non-negative values" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("Given three sides do not form a triangle" ) _lowerCamelCase : str = (sidea + sidea + sidea) / 2 _lowerCamelCase : List[Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if base < 0 or height < 0: raise ValueError("area_parallelogram() only accepts non-negative values" ) return base * height def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if basea < 0 or basea < 0 or height < 0: raise ValueError("area_trapezium() only accepts non-negative values" ) return 1 / 2 * (basea + basea) * height def lowerCamelCase_( _lowerCamelCase ) -> float: '''simple docstring''' if radius < 0: raise ValueError("area_circle() only accepts non-negative values" ) return pi * radius**2 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if radius_x < 0 or radius_y < 0: raise ValueError("area_ellipse() only accepts non-negative values" ) return pi * radius_x * radius_y def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if diagonal_a < 0 or diagonal_a < 0: raise ValueError("area_rhombus() only accepts non-negative values" ) return 1 / 2 * diagonal_a * diagonal_a def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3: raise ValueError( "area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides" ) elif length < 0: raise ValueError( "area_reg_polygon() only accepts non-negative values as \ length of a side" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('''[DEMO] Areas of various geometric shapes: \n''') print(f'''Rectangle: {area_rectangle(10, 20) = }''') print(f'''Square: {area_square(10) = }''') print(f'''Triangle: {area_triangle(10, 10) = }''') print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''') print(f'''Parallelogram: {area_parallelogram(10, 20) = }''') print(f'''Rhombus: {area_rhombus(10, 20) = }''') print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''') print(f'''Circle: {area_circle(20) = }''') print(f'''Ellipse: {area_ellipse(10, 20) = }''') print('''\nSurface Areas of various geometric shapes: \n''') print(f'''Cube: {surface_area_cube(20) = }''') print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''') print(f'''Sphere: {surface_area_sphere(20) = }''') print(f'''Hemisphere: {surface_area_hemisphere(20) = }''') print(f'''Cone: {surface_area_cone(10, 20) = }''') print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''') print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''') print(f'''Torus: {surface_area_torus(20, 10) = }''') print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''') print(f'''Square: {area_reg_polygon(4, 10) = }''') print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
340
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
1
"""simple docstring""" from collections import defaultdict from math import gcd def lowerCamelCase_( _lowerCamelCase = 1500000 ) -> int: '''simple docstring''' _lowerCamelCase : defaultdict = defaultdict(_lowerCamelCase ) _lowerCamelCase : List[Any] = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _lowerCamelCase , 2 ): if gcd(_lowerCamelCase , _lowerCamelCase ) > 1: continue _lowerCamelCase : Tuple = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f'''{solution() = }''')
340
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class A_ ( unittest.TestCase ): def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Dict = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() _lowerCamelCase : List[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) ) _lowerCamelCase : Optional[int] = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } _lowerCamelCase : Any = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16_000, "return_attention_mask": False, "do_normalize": True, } _lowerCamelCase : Optional[int] = tempfile.mkdtemp() _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) _lowerCamelCase : List[Any] = os.path.join(self.tmpdirname ,__lowerCAmelCase ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + "\n" ) with open(self.feature_extraction_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + "\n" ) # load decoder from hub _lowerCamelCase : Optional[Any] = "hf-internal-testing/ngram-beam-search-decoder" def _lowercase ( self: str ,**__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : List[str] = self.add_kwargs_tokens_map.copy() kwargs.update(__lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: int ,**__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: int ,**__lowerCAmelCase: List[str] ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Tuple = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_feature_extractor() _lowerCamelCase : Dict = self.get_decoder() _lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,decoder=__lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCamelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,__lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,) self.assertIsInstance(processor.decoder ,__lowerCAmelCase ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Dict = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match _lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha ,5.0 ) self.assertEqual(processor.language_model.beta ,3.0 ) self.assertEqual(processor.language_model.score_boundary ,-7.0 ) self.assertEqual(processor.language_model.unk_score_offset ,3 ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : List[Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["xx"] ) with self.assertRaisesRegex(__lowerCAmelCase ,"include" ): WavaVecaProcessorWithLM( tokenizer=__lowerCAmelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : List[str] = self.get_feature_extractor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Dict = self.get_decoder() _lowerCamelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,decoder=__lowerCAmelCase ) _lowerCamelCase : List[Any] = floats_list((3, 1_000) ) _lowerCamelCase : Optional[int] = feature_extractor(__lowerCAmelCase ,return_tensors="np" ) _lowerCamelCase : Optional[int] = processor(__lowerCAmelCase ,return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Any = self.get_feature_extractor() _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_decoder() _lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,decoder=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = "This is a test string" _lowerCamelCase : List[str] = processor(text=__lowerCAmelCase ) _lowerCamelCase : int = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[Any]=(2, 10, 16) ,__lowerCAmelCase: Optional[Any]=77 ): '''simple docstring''' np.random.seed(__lowerCAmelCase ) return np.random.rand(*__lowerCAmelCase ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Any = self.get_feature_extractor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_decoder() _lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,decoder=__lowerCAmelCase ) _lowerCamelCase : Tuple = self._get_dummy_logits(shape=(10, 16) ,seed=13 ) _lowerCamelCase : List[Any] = processor.decode(__lowerCAmelCase ) _lowerCamelCase : Any = decoder.decode_beams(__lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0] ,decoded_processor.text ) self.assertEqual("</s> <s> </s>" ,decoded_processor.text ) self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score ) @parameterized.expand([[None], ["fork"], ["spawn"]] ) def _lowercase ( self: Any ,__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.get_feature_extractor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : Optional[int] = self.get_decoder() _lowerCamelCase : int = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,decoder=__lowerCAmelCase ) _lowerCamelCase : List[str] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: _lowerCamelCase : Tuple = processor.batch_decode(__lowerCAmelCase ) else: with get_context(__lowerCAmelCase ).Pool() as pool: _lowerCamelCase : str = processor.batch_decode(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = list(__lowerCAmelCase ) with get_context("fork" ).Pool() as p: _lowerCamelCase : Any = decoder.decode_beams_batch(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(__lowerCAmelCase ,decoded_processor.text ) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] ,decoded_processor.text ) self.assertListEqual(__lowerCAmelCase ,decoded_processor.logit_score ) self.assertListEqual(__lowerCAmelCase ,decoded_processor.lm_score ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.get_feature_extractor() _lowerCamelCase : Any = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_decoder() _lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,decoder=__lowerCAmelCase ) _lowerCamelCase : Tuple = self._get_dummy_logits() _lowerCamelCase : str = 15 _lowerCamelCase : Optional[Any] = -20.0 _lowerCamelCase : Optional[Any] = -4.0 _lowerCamelCase : Optional[Any] = processor.batch_decode( __lowerCAmelCase ,beam_width=__lowerCAmelCase ,beam_prune_logp=__lowerCAmelCase ,token_min_logp=__lowerCAmelCase ,) _lowerCamelCase : List[Any] = decoded_processor_out.text _lowerCamelCase : Any = list(__lowerCAmelCase ) with get_context("fork" ).Pool() as pool: _lowerCamelCase : Optional[int] = decoder.decode_beams_batch( __lowerCAmelCase ,__lowerCAmelCase ,beam_width=__lowerCAmelCase ,beam_prune_logp=__lowerCAmelCase ,token_min_logp=__lowerCAmelCase ,) _lowerCamelCase : List[Any] = [d[0][0] for d in decoded_decoder_out] _lowerCamelCase : List[str] = [d[0][2] for d in decoded_decoder_out] _lowerCamelCase : Dict = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] ,__lowerCAmelCase ) self.assertTrue(np.array_equal(__lowerCAmelCase ,decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.0_54, -18.4_47] ,__lowerCAmelCase ,atol=1e-3 ) ) self.assertTrue(np.array_equal(__lowerCAmelCase ,decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.5_54, -13.94_74] ,__lowerCAmelCase ,atol=1e-3 ) ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Any = self.get_feature_extractor() _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : str = self.get_decoder() _lowerCamelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,decoder=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self._get_dummy_logits() _lowerCamelCase : Optional[int] = 2.0 _lowerCamelCase : Any = 5.0 _lowerCamelCase : Dict = -20.0 _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = processor.batch_decode( __lowerCAmelCase ,alpha=__lowerCAmelCase ,beta=__lowerCAmelCase ,unk_score_offset=__lowerCAmelCase ,lm_score_boundary=__lowerCAmelCase ,) _lowerCamelCase : str = decoded_processor_out.text _lowerCamelCase : Any = list(__lowerCAmelCase ) decoder.reset_params( alpha=__lowerCAmelCase ,beta=__lowerCAmelCase ,unk_score_offset=__lowerCAmelCase ,lm_score_boundary=__lowerCAmelCase ,) with get_context("fork" ).Pool() as pool: _lowerCamelCase : str = decoder.decode_beams_batch( __lowerCAmelCase ,__lowerCAmelCase ,) _lowerCamelCase : Optional[Any] = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] ,__lowerCAmelCase ) _lowerCamelCase : Any = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha ,2.0 ) self.assertEqual(lm_model.beta ,5.0 ) self.assertEqual(lm_model.unk_score_offset ,-20.0 ) self.assertEqual(lm_model.score_boundary ,__lowerCAmelCase ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) _lowerCamelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key] _lowerCamelCase : str = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() _lowerCamelCase : Optional[int] = os.listdir(__lowerCAmelCase ) _lowerCamelCase : List[Any] = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Any = snapshot_download("hf-internal-testing/processor_with_lm" ) _lowerCamelCase : str = WavaVecaProcessorWithLM.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : int = processor.decoder.model_container[processor.decoder._model_key] _lowerCamelCase : List[Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() _lowerCamelCase : str = os.listdir(__lowerCAmelCase ) _lowerCamelCase : Dict = os.listdir(__lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) _lowerCamelCase : int = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" ) _lowerCamelCase : int = floats_list((3, 1_000) ) _lowerCamelCase : List[str] = processor_wavaveca(__lowerCAmelCase ,return_tensors="np" ) _lowerCamelCase : Tuple = processor_auto(__lowerCAmelCase ,return_tensors="np" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 ) _lowerCamelCase : List[str] = self._get_dummy_logits() _lowerCamelCase : Tuple = processor_wavaveca.batch_decode(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = processor_auto.batch_decode(__lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : str = self.get_feature_extractor() _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : str = self.get_decoder() _lowerCamelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,decoder=__lowerCAmelCase ) self.assertListEqual( processor.model_input_names ,feature_extractor.model_input_names ,msg="`processor` and `feature_extractor` model input names do not match" ,) @staticmethod def _lowercase ( __lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : Tuple = [d[key] for d in offsets] return retrieved_list def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) _lowerCamelCase : List[Any] = self._get_dummy_logits()[0] _lowerCamelCase : Any = processor.decode(__lowerCAmelCase ,output_word_offsets=__lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) ) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] ,"word" ) ) ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] ,"word" ) ,["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] ,"start_offset" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] ,"end_offset" ) ,[1, 3, 5] ) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) _lowerCamelCase : int = self._get_dummy_logits() _lowerCamelCase : Union[str, Any] = processor.batch_decode(__lowerCAmelCase ,output_word_offsets=__lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) ) self.assertListEqual( [" ".join(self.get_from_offsets(__lowerCAmelCase ,"word" ) ) for o in outputs["word_offsets"]] ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] ,"word" ) ,["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] ,"start_offset" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] ,"end_offset" ) ,[1, 3, 5] ) @slow @require_torch @require_torchaudio def _lowercase ( self: Tuple ): '''simple docstring''' import torch _lowerCamelCase : str = load_dataset("common_voice" ,"en" ,split="train" ,streaming=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ds.cast_column("audio" ,datasets.Audio(sampling_rate=16_000 ) ) _lowerCamelCase : Optional[int] = iter(__lowerCAmelCase ) _lowerCamelCase : List[str] = next(__lowerCAmelCase ) _lowerCamelCase : int = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) _lowerCamelCase : Union[str, Any] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train _lowerCamelCase : Any = processor(sample["audio"]["array"] ,return_tensors="pt" ).input_values with torch.no_grad(): _lowerCamelCase : List[str] = model(__lowerCAmelCase ).logits.cpu().numpy() _lowerCamelCase : int = processor.decode(logits[0] ,output_word_offsets=__lowerCAmelCase ) _lowerCamelCase : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate _lowerCamelCase : str = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] _lowerCamelCase : int = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" # output words self.assertEqual(" ".join(self.get_from_offsets(__lowerCAmelCase ,"word" ) ) ,__lowerCAmelCase ) self.assertEqual(" ".join(self.get_from_offsets(__lowerCAmelCase ,"word" ) ) ,output.text ) # output times _lowerCamelCase : int = torch.tensor(self.get_from_offsets(__lowerCAmelCase ,"start_time" ) ) _lowerCamelCase : List[Any] = torch.tensor(self.get_from_offsets(__lowerCAmelCase ,"end_time" ) ) # fmt: off _lowerCamelCase : Dict = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] ) _lowerCamelCase : Union[str, Any] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] ) # fmt: on self.assertTrue(torch.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=0.01 ) ) self.assertTrue(torch.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=0.01 ) )
340
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
1
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : List[str] = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) _lowerCAmelCase : Optional[int] = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) _lowerCAmelCase : List[str] = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) _lowerCAmelCase : int = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) _lowerCAmelCase : Tuple = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) _lowerCAmelCase : Any = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) _lowerCAmelCase : List[str] = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) _lowerCAmelCase : Optional[Any] = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) _lowerCAmelCase : List[Any] = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) _lowerCAmelCase : List[Any] = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) _lowerCAmelCase : Optional[int] = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) _lowerCAmelCase : List[str] = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) _lowerCAmelCase : int = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) _lowerCAmelCase : Union[str, Any] = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) _lowerCAmelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) _lowerCAmelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) _lowerCAmelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) _lowerCAmelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) _lowerCAmelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) _lowerCAmelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) _lowerCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) _lowerCAmelCase : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) _lowerCAmelCase : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) _lowerCAmelCase : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) _lowerCAmelCase : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) _lowerCAmelCase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) _lowerCAmelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) _lowerCAmelCase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_MAPPING _lowerCAmelCase : Optional[int] = auto_class_update(FlaxAutoModel) class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING _lowerCAmelCase : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING _lowerCAmelCase : Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING _lowerCAmelCase : Optional[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _lowerCAmelCase : Optional[Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _lowerCAmelCase : str = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING _lowerCAmelCase : Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _lowerCAmelCase : Optional[int] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING _lowerCAmelCase : Dict = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING _lowerCAmelCase : Optional[int] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _lowerCAmelCase : Any = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING _lowerCAmelCase : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class A_ ( _BaseAutoModelClass ): lowerCAmelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING _lowerCAmelCase : Dict = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging _lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name class A_ ( _a ): def __init__( self: Tuple ,__lowerCAmelCase: AutoencoderKL ,__lowerCAmelCase: CLIPTextModel ,__lowerCAmelCase: CLIPTokenizer ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,__lowerCAmelCase: StableDiffusionSafetyChecker ,__lowerCAmelCase: CLIPImageProcessor ,): '''simple docstring''' super().__init__() self.register_modules( vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,safety_checker=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,) def _lowercase ( self: Dict ,__lowerCAmelCase: Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _lowerCamelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' self.enable_attention_slicing(__lowerCAmelCase ) @torch.no_grad() def __call__( self: Optional[Any] ,__lowerCAmelCase: Union[str, List[str]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 50 ,__lowerCAmelCase: float = 7.5 ,__lowerCAmelCase: Optional[Union[str, List[str]]] = None ,__lowerCAmelCase: Optional[int] = 1 ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: Optional[torch.Generator] = None ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,**__lowerCAmelCase: Optional[int] ,): '''simple docstring''' if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : str = 1 elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = len(__lowerCAmelCase ) else: raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(__lowerCAmelCase )}.""" ) # get prompt text embeddings _lowerCamelCase : Any = self.tokenizer( __lowerCAmelCase ,padding="max_length" ,max_length=self.tokenizer.model_max_length ,return_tensors="pt" ,) _lowerCamelCase : List[str] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _lowerCamelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) _lowerCamelCase : List[str] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: _lowerCamelCase : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = text_embeddings.shape _lowerCamelCase : Optional[Any] = text_embeddings.repeat(1 ,__lowerCAmelCase ,1 ) _lowerCamelCase : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt ,__lowerCAmelCase ,-1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _lowerCamelCase : List[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _lowerCamelCase : List[str] if negative_prompt is None: _lowerCamelCase : Optional[Any] = [""] elif type(__lowerCAmelCase ) is not type(__lowerCAmelCase ): raise TypeError( F"""`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCAmelCase )} !=""" F""" {type(__lowerCAmelCase )}.""" ) elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[str] = [negative_prompt] elif batch_size != len(__lowerCAmelCase ): raise ValueError( F"""`negative_prompt`: {negative_prompt} has batch size {len(__lowerCAmelCase )}, but `prompt`:""" F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" " the batch size of `prompt`." ) else: _lowerCamelCase : int = negative_prompt _lowerCamelCase : Tuple = text_input_ids.shape[-1] _lowerCamelCase : Optional[int] = self.tokenizer( __lowerCAmelCase ,padding="max_length" ,max_length=__lowerCAmelCase ,truncation=__lowerCAmelCase ,return_tensors="pt" ,) _lowerCamelCase : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _lowerCamelCase : Optional[Any] = uncond_embeddings.shape[1] _lowerCamelCase : Any = uncond_embeddings.repeat(__lowerCAmelCase ,__lowerCAmelCase ,1 ) _lowerCamelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt ,__lowerCAmelCase ,-1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCamelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _lowerCamelCase : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) _lowerCamelCase : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) _lowerCamelCase : Any = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps _lowerCamelCase : str = torch.randn( __lowerCAmelCase ,generator=__lowerCAmelCase ,device="cpu" ,dtype=__lowerCAmelCase ).to(self.device ) _lowerCamelCase : str = torch.randn(__lowerCAmelCase ,generator=__lowerCAmelCase ,device="cpu" ,dtype=__lowerCAmelCase ).to( self.device ) else: _lowerCamelCase : Any = torch.randn( __lowerCAmelCase ,generator=__lowerCAmelCase ,device=self.device ,dtype=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = torch.randn(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=self.device ,dtype=__lowerCAmelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) _lowerCamelCase : List[str] = latents_reference.to(self.device ) _lowerCamelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images _lowerCamelCase : Tuple = (latents_shape[3] - latents_shape_reference[3]) // 2 _lowerCamelCase : Dict = (latents_shape[2] - latents_shape_reference[2]) // 2 _lowerCamelCase : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx _lowerCamelCase : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy _lowerCamelCase : int = 0 if dx < 0 else dx _lowerCamelCase : Union[str, Any] = 0 if dy < 0 else dy _lowerCamelCase : str = max(-dx ,0 ) _lowerCamelCase : Union[str, Any] = max(-dy ,0 ) # import pdb # pdb.set_trace() _lowerCamelCase : Union[str, Any] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__lowerCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand _lowerCamelCase : str = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _lowerCamelCase : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _lowerCamelCase : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _lowerCamelCase : str = {} if accepts_eta: _lowerCamelCase : str = eta for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ): # expand the latents if we are doing classifier free guidance _lowerCamelCase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCamelCase : Optional[Any] = self.scheduler.scale_model_input(__lowerCAmelCase ,__lowerCAmelCase ) # predict the noise residual _lowerCamelCase : Optional[int] = self.unet(__lowerCAmelCase ,__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ).sample # perform guidance if do_classifier_free_guidance: _lowerCamelCase, _lowerCamelCase : Any = noise_pred.chunk(2 ) _lowerCamelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 _lowerCamelCase : List[Any] = self.scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : List[str] = 1 / 0.1_82_15 * latents _lowerCamelCase : List[str] = self.vae.decode(__lowerCAmelCase ).sample _lowerCamelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 ,1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _lowerCamelCase : Tuple = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if self.safety_checker is not None: _lowerCamelCase : List[Any] = self.feature_extractor(self.numpy_to_pil(__lowerCAmelCase ) ,return_tensors="pt" ).to( self.device ) _lowerCamelCase, _lowerCamelCase : List[str] = self.safety_checker( images=__lowerCAmelCase ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: _lowerCamelCase : List[Any] = None if output_type == "pil": _lowerCamelCase : Optional[int] = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__lowerCAmelCase ,nsfw_content_detected=__lowerCAmelCase )
340
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = torch.device('''cpu''') def lowerCamelCase_( ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Any = dct.pop(_lowerCamelCase ) _lowerCamelCase : Any = val def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : List[Any] = [] for k in state_dict.keys(): _lowerCamelCase : List[str] = k if ".pwconv" in k: _lowerCamelCase : int = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: _lowerCamelCase : Optional[int] = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: _lowerCamelCase : Tuple = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: _lowerCamelCase : Optional[Any] = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: _lowerCamelCase : List[str] = k_new.split("." ) if ls[2].isdigit(): _lowerCamelCase : str = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: _lowerCamelCase : Any = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : int = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _lowerCamelCase : int = 1000 _lowerCamelCase : Optional[Any] = "huggingface/label-files" _lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json" _lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : Union[str, Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _lowerCamelCase : Optional[Any] = [3, 3, 6, 4] _lowerCamelCase : Union[str, Any] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": _lowerCamelCase : Optional[Any] = [3, 3, 9, 6] _lowerCamelCase : Any = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": _lowerCamelCase : Optional[Any] = [4, 3, 10, 5] _lowerCamelCase : Dict = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": _lowerCamelCase : int = [4, 4, 12, 6] _lowerCamelCase : int = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): _lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" , check_hash=_lowerCamelCase ) else: _lowerCamelCase : str = torch.load(_lowerCamelCase , map_location="cpu" ) _lowerCamelCase : Optional[int] = checkpoint _lowerCamelCase : Optional[Any] = create_rename_keys(_lowerCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # load HuggingFace model _lowerCamelCase : Optional[int] = SwiftFormerForImageClassification(_lowerCamelCase ).eval() hf_model.load_state_dict(_lowerCamelCase ) # prepare test inputs _lowerCamelCase : Dict = prepare_img() _lowerCamelCase : List[Any] = ViTImageProcessor.from_pretrained("preprocessor_config" ) _lowerCamelCase : Optional[int] = processor(images=_lowerCamelCase , return_tensors="pt" ) # compare outputs from both models _lowerCamelCase : int = get_expected_output(_lowerCamelCase ) _lowerCamelCase : List[Any] = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _lowerCamelCase , atol=1e-3 ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _lowerCAmelCase : str = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
340
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
1
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _lowerCAmelCase : Optional[Any] = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' if args.student_type == "roberta": _lowerCamelCase : Tuple = False elif args.student_type == "gpt2": _lowerCamelCase : int = False def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: '''simple docstring''' if args.student_type == "roberta": _lowerCamelCase : int = False def lowerCamelCase_( ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=_lowerCamelCase , required=_lowerCamelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=_lowerCamelCase , choices=["distilbert", "roberta", "gpt2"] , required=_lowerCamelCase , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=_lowerCamelCase , type=_lowerCamelCase , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=_lowerCamelCase , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=_lowerCamelCase , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=_lowerCamelCase , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=_lowerCamelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=_lowerCamelCase , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=_lowerCamelCase , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=_lowerCamelCase , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.1_5 , type=_lowerCamelCase , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=_lowerCamelCase , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=_lowerCamelCase , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=_lowerCamelCase , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=_lowerCamelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=_lowerCamelCase , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=_lowerCamelCase , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=_lowerCamelCase , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=_lowerCamelCase , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.0_5 , type=_lowerCamelCase , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=_lowerCamelCase , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5e-4 , type=_lowerCamelCase , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1e-6 , type=_lowerCamelCase , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=_lowerCamelCase , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.0_2 , type=_lowerCamelCase , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=_lowerCamelCase , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=_lowerCamelCase , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=_lowerCamelCase , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=_lowerCamelCase , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=_lowerCamelCase , default=500 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=_lowerCamelCase , default=4000 , help="Checkpoint interval." ) _lowerCamelCase : str = parser.parse_args() sanity_checks(_lowerCamelCase ) # ARGS # init_gpu_params(_lowerCamelCase ) set_seed(_lowerCamelCase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(F"""Param: {args}""" ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(_lowerCamelCase ) , _lowerCamelCase , indent=4 ) git_log(args.dump_path ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = MODEL_CLASSES[args.student_type] _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _lowerCamelCase : int = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _lowerCamelCase : List[str] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _lowerCamelCase : Tuple = tokenizer.all_special_tokens.index(_lowerCamelCase ) _lowerCamelCase : int = tokenizer.all_special_ids[idx] logger.info(F"""Special tokens {special_tok_ids}""" ) _lowerCamelCase : Optional[Any] = special_tok_ids _lowerCamelCase : List[str] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F"""Loading data from {args.data_file}""" ) with open(args.data_file , "rb" ) as fp: _lowerCamelCase : str = pickle.load(_lowerCamelCase ) if args.mlm: logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , "rb" ) as fp: _lowerCamelCase : str = pickle.load(_lowerCamelCase ) _lowerCamelCase : List[str] = np.maximum(_lowerCamelCase , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _lowerCamelCase : Tuple = 0.0 # do not predict special tokens _lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ) else: _lowerCamelCase : str = None _lowerCamelCase : int = LmSeqsDataset(params=_lowerCamelCase , data=_lowerCamelCase ) logger.info("Data loader created." ) # STUDENT # logger.info(F"""Loading student config from {args.student_config}""" ) _lowerCamelCase : Dict = student_config_class.from_pretrained(args.student_config ) _lowerCamelCase : Optional[int] = True if args.student_pretrained_weights is not None: logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" ) _lowerCamelCase : Union[str, Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_lowerCamelCase ) else: _lowerCamelCase : Any = student_model_class(_lowerCamelCase ) if args.n_gpu > 0: student.to(F"""cuda:{args.local_rank}""" ) logger.info("Student loaded." ) # TEACHER # _lowerCamelCase : Any = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_lowerCamelCase ) if args.n_gpu > 0: teacher.to(F"""cuda:{args.local_rank}""" ) logger.info(F"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_lowerCamelCase , _lowerCamelCase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_lowerCamelCase , _lowerCamelCase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _lowerCamelCase : Union[str, Any] = Distiller( params=_lowerCamelCase , dataset=_lowerCamelCase , token_probs=_lowerCamelCase , student=_lowerCamelCase , teacher=_lowerCamelCase ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
340
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _lowerCamelCase : int = precision _lowerCamelCase : Dict = ceil(precision / 14 ) _lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt() _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = 13591409 _lowerCamelCase : int = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' if num < 0: return False _lowerCamelCase : int = num _lowerCamelCase : int = 0 while num > 0: _lowerCamelCase : List[Any] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase : str = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[Any] = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Union[str, Any] = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' _lowerCamelCase : Dict = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase ) _lowerCamelCase : List[str] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase ) class A_ ( _a ): lowerCAmelCase__ = 'sigmoid' lowerCAmelCase__ = 'softmax' lowerCAmelCase__ = 'none' @add_end_docstrings( _a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , ) class A_ ( _a ): lowerCAmelCase__ = False lowerCAmelCase__ = ClassificationFunction.NONE def __init__( self: Tuple ,**__lowerCAmelCase: Any ): '''simple docstring''' super().__init__(**__lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: str=None ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Tuple="" ,**__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : Optional[Any] = tokenizer_kwargs _lowerCamelCase : Optional[int] = {} if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None: _lowerCamelCase : List[Any] = self.model.config.return_all_scores if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None: _lowerCamelCase : Tuple = top_k _lowerCamelCase : Any = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,) if return_all_scores: _lowerCamelCase : Optional[Any] = None else: _lowerCamelCase : Tuple = 1 if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[str] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: _lowerCamelCase : Optional[Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self: int ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. _lowerCamelCase : Optional[int] = "top_k" not in kwargs if isinstance(args[0] ,__lowerCAmelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _lowercase ( self: int ,__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : Dict = self.framework if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) def _lowercase ( self: Any ,__lowerCAmelCase: Dict ): '''simple docstring''' return self.model(**__lowerCAmelCase ) def _lowercase ( self: Any ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Optional[int]=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: _lowerCamelCase : Dict = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: _lowerCamelCase : Union[str, Any] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None: _lowerCamelCase : List[Any] = self.model.config.function_to_apply else: _lowerCamelCase : List[str] = ClassificationFunction.NONE _lowerCamelCase : Optional[Any] = model_outputs["logits"][0] _lowerCamelCase : Tuple = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: _lowerCamelCase : Optional[int] = sigmoid(__lowerCAmelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: _lowerCamelCase : List[Any] = softmax(__lowerCAmelCase ) elif function_to_apply == ClassificationFunction.NONE: _lowerCamelCase : List[Any] = outputs else: raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} _lowerCamelCase : Optional[int] = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase ) ] if not _legacy: dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase ) if top_k is not None: _lowerCamelCase : Optional[int] = dict_scores[:top_k] return dict_scores
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
1
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
1
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowerCAmelCase : Tuple = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class A_ : lowerCAmelCase__ = 42 lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = _str_to_version_tuple(self.version_str ) def __repr__( self: List[str] ): '''simple docstring''' return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}""" @property def _lowercase ( self: Dict ): '''simple docstring''' return self.major, self.minor, self.patch def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return Version(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return other raise TypeError(F"""{other} (type {type(__lowerCAmelCase )}) cannot be compared to version.""" ) def __eq__( self: int ,__lowerCAmelCase: str ): '''simple docstring''' try: _lowerCamelCase : int = self._validate_operand(__lowerCAmelCase ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self: List[Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : int = self._validate_operand(__lowerCAmelCase ) return self.tuple < other.tuple def __hash__( self: Optional[int] ): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _lowercase ( cls: Optional[Any] ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : Any = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _lowercase ( self: int ): '''simple docstring''' return self.version_str def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : List[str] = _VERSION_REG.match(_lowerCamelCase ) if not res: raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" ) return tuple(int(_lowerCamelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' return ".".join(str(_lowerCamelCase ) for v in version_tuple )
340
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
1
"""simple docstring""" import math from datetime import datetime, timedelta def lowerCamelCase_( _lowerCamelCase ) -> datetime: '''simple docstring''' _lowerCamelCase : List[Any] = year % 19 _lowerCamelCase : Any = year % 4 _lowerCamelCase : Tuple = year % 7 _lowerCamelCase : int = math.floor(year / 100 ) _lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 ) _lowerCamelCase : str = leap_day_inhibits / 4 _lowerCamelCase : str = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 _lowerCamelCase : Any = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _lowerCamelCase : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon _lowerCamelCase : Dict = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(_lowerCamelCase , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(_lowerCamelCase , 4 , 18 ) else: return datetime(_lowerCamelCase , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): _lowerCAmelCase : Union[str, Any] = '''will be''' if year > datetime.now().year else '''was''' print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
340
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
1
"""simple docstring""" from __future__ import annotations _lowerCAmelCase : List[Any] = 10 def lowerCamelCase_( _lowerCamelCase ) -> list[int]: '''simple docstring''' _lowerCamelCase : Tuple = 1 _lowerCamelCase : int = max(_lowerCamelCase ) while placement <= max_digit: # declare and initialize empty buckets _lowerCamelCase : list[list] = [[] for _ in range(_lowerCamelCase )] # split list_of_ints between the buckets for i in list_of_ints: _lowerCamelCase : Union[str, Any] = int((i / placement) % RADIX ) buckets[tmp].append(_lowerCamelCase ) # put each buckets' contents into list_of_ints _lowerCamelCase : Optional[Any] = 0 for b in range(_lowerCamelCase ): for i in buckets[b]: _lowerCamelCase : List[Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase : str = { '''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''], '''tokenization_convbert''': ['''ConvBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = ['''ConvBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = [ '''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvBertForMaskedLM''', '''ConvBertForMultipleChoice''', '''ConvBertForQuestionAnswering''', '''ConvBertForSequenceClassification''', '''ConvBertForTokenClassification''', '''ConvBertLayer''', '''ConvBertModel''', '''ConvBertPreTrainedModel''', '''load_tf_weights_in_convbert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : List[Any] = [ '''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFConvBertForMaskedLM''', '''TFConvBertForMultipleChoice''', '''TFConvBertForQuestionAnswering''', '''TFConvBertForSequenceClassification''', '''TFConvBertForTokenClassification''', '''TFConvBertLayer''', '''TFConvBertModel''', '''TFConvBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class A_ ( nn.Module ): lowerCAmelCase__ = 42 lowerCAmelCase__ = jnp.floataa def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self: str ,__lowerCAmelCase: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = hidden_states.shape _lowerCamelCase : str = jax.image.resize( __lowerCAmelCase ,shape=(batch, height * 2, width * 2, channels) ,method="nearest" ,) _lowerCamelCase : Optional[int] = self.conv(__lowerCAmelCase ) return hidden_states class A_ ( nn.Module ): lowerCAmelCase__ = 42 lowerCAmelCase__ = jnp.floataa def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : int = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self: Union[str, Any] ,__lowerCAmelCase: Any ): '''simple docstring''' _lowerCamelCase : List[str] = self.conv(__lowerCAmelCase ) return hidden_states class A_ ( nn.Module ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None lowerCAmelCase__ = 0.0 lowerCAmelCase__ = None lowerCAmelCase__ = jnp.floataa def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = self.in_channels if self.out_channels is None else self.out_channels _lowerCamelCase : str = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 ) _lowerCamelCase : int = nn.Conv( __lowerCAmelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) _lowerCamelCase : List[str] = nn.Dense(__lowerCAmelCase ,dtype=self.dtype ) _lowerCamelCase : Union[str, Any] = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 ) _lowerCamelCase : str = nn.Dropout(self.dropout_prob ) _lowerCamelCase : List[str] = nn.Conv( __lowerCAmelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) _lowerCamelCase : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _lowerCamelCase : Any = None if use_nin_shortcut: _lowerCamelCase : List[str] = nn.Conv( __lowerCAmelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="VALID" ,dtype=self.dtype ,) def __call__( self: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int]=True ): '''simple docstring''' _lowerCamelCase : Optional[Any] = hidden_states _lowerCamelCase : Optional[Any] = self.norma(__lowerCAmelCase ) _lowerCamelCase : Any = nn.swish(__lowerCAmelCase ) _lowerCamelCase : List[Any] = self.conva(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.time_emb_proj(nn.swish(__lowerCAmelCase ) ) _lowerCamelCase : Any = jnp.expand_dims(jnp.expand_dims(__lowerCAmelCase ,1 ) ,1 ) _lowerCamelCase : Tuple = hidden_states + temb _lowerCamelCase : Union[str, Any] = self.norma(__lowerCAmelCase ) _lowerCamelCase : Dict = nn.swish(__lowerCAmelCase ) _lowerCamelCase : List[Any] = self.dropout(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : List[Any] = self.conva(__lowerCAmelCase ) if self.conv_shortcut is not None: _lowerCamelCase : Union[str, Any] = self.conv_shortcut(__lowerCAmelCase ) return hidden_states + residual
340
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
1
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
1
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A_ : lowerCAmelCase__ = XGLMConfig lowerCAmelCase__ = {} lowerCAmelCase__ = 'gelu' def __init__( self: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[Any]=14 ,__lowerCAmelCase: Any=7 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Optional[Any]=99 ,__lowerCAmelCase: int=32 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: str=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: str=512 ,__lowerCAmelCase: List[str]=0.02 ,): '''simple docstring''' _lowerCamelCase : Dict = parent _lowerCamelCase : Tuple = batch_size _lowerCamelCase : Optional[int] = seq_length _lowerCamelCase : Optional[int] = is_training _lowerCamelCase : Any = use_input_mask _lowerCamelCase : Tuple = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : int = d_model _lowerCamelCase : Optional[int] = num_hidden_layers _lowerCamelCase : List[str] = num_attention_heads _lowerCamelCase : Dict = ffn_dim _lowerCamelCase : List[str] = activation_function _lowerCamelCase : Dict = activation_dropout _lowerCamelCase : Optional[int] = attention_dropout _lowerCamelCase : Union[str, Any] = max_position_embeddings _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : Any = None _lowerCamelCase : Tuple = 0 _lowerCamelCase : Union[str, Any] = 2 _lowerCamelCase : Optional[Any] = 1 def _lowercase ( self: List[Any] ): '''simple docstring''' return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) ,clip_value_min=0 ,clip_value_max=3 ) _lowerCamelCase : Union[str, Any] = None if self.use_input_mask: _lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Dict = self.get_config() _lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowercase ( self: Optional[int] ): '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=__lowerCAmelCase ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=__lowerCAmelCase ,) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Tuple = self.prepare_config_and_inputs() ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : Union[str, Any] = config_and_inputs _lowerCamelCase : Optional[int] = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class A_ ( _a , _a , unittest.TestCase ): lowerCAmelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCAmelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCAmelCase__ = ( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Dict = TFXGLMModelTester(self ) _lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,n_embd=37 ) def _lowercase ( self: List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @slow def _lowercase ( self: Optional[int] ): '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowercase ( self: Dict ): '''simple docstring''' super().test_resize_token_embeddings() @require_tf class A_ ( unittest.TestCase ): @slow def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict=True ): '''simple docstring''' _lowerCamelCase : List[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _lowerCamelCase : int = tf.convert_to_tensor([[2, 268, 9_865]] ,dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on _lowerCamelCase : int = model.generate(__lowerCAmelCase ,do_sample=__lowerCAmelCase ,num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() ,__lowerCAmelCase ) @slow def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) _lowerCamelCase : Dict = tokenizer("Today is a nice day and" ,return_tensors="tf" ) _lowerCamelCase : Dict = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,do_sample=__lowerCAmelCase ,seed=[7, 0] ) _lowerCamelCase : int = tokenizer.decode(output_ids[0] ,skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : List[str] = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) @slow def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _lowerCamelCase : Optional[int] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _lowerCamelCase : Optional[int] = "left" # use different length sentences to test batching _lowerCamelCase : Optional[Any] = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] _lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase ,return_tensors="tf" ,padding=__lowerCAmelCase ) _lowerCamelCase : int = inputs["input_ids"] _lowerCamelCase : List[Any] = model.generate(input_ids=__lowerCAmelCase ,attention_mask=inputs["attention_mask"] ,max_new_tokens=12 ) _lowerCamelCase : Optional[int] = tokenizer(sentences[0] ,return_tensors="tf" ).input_ids _lowerCamelCase : Dict = model.generate(input_ids=__lowerCAmelCase ,max_new_tokens=12 ) _lowerCamelCase : Optional[Any] = tokenizer(sentences[1] ,return_tensors="tf" ).input_ids _lowerCamelCase : Optional[int] = model.generate(input_ids=__lowerCAmelCase ,max_new_tokens=12 ) _lowerCamelCase : Dict = tokenizer.batch_decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : str = tokenizer.decode(output_padded[0] ,skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase ,[non_padded_sentence, padded_sentence] )
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) class A_ ( _a ): lowerCAmelCase__ = ['input_values', 'padding_mask'] def __init__( self: Tuple ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: int = 24_000 ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: float = None ,__lowerCAmelCase: float = None ,**__lowerCAmelCase: Union[str, Any] ,): '''simple docstring''' super().__init__(feature_size=__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,padding_value=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Any = chunk_length_s _lowerCamelCase : int = overlap @property def _lowercase ( self: Dict ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _lowercase ( self: Tuple ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self: Tuple ,__lowerCAmelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__lowerCAmelCase: Optional[Union[bool, str, PaddingStrategy]] = None ,__lowerCAmelCase: Optional[bool] = False ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,__lowerCAmelCase: Optional[int] = None ,): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Union[str, Any] = bool( isinstance(__lowerCAmelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: _lowerCamelCase : List[str] = [np.asarray(__lowerCAmelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__lowerCAmelCase ,np.ndarray ): _lowerCamelCase : Dict = np.asarray(__lowerCAmelCase ,dtype=np.floataa ) elif isinstance(__lowerCAmelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): _lowerCamelCase : Tuple = raw_audio.astype(np.floataa ) # always return batch if not is_batched: _lowerCamelCase : Optional[int] = [np.asarray(__lowerCAmelCase ).T] # verify inputs are valid for idx, example in enumerate(__lowerCAmelCase ): if example.ndim > 2: raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" ) _lowerCamelCase : Tuple = None _lowerCamelCase : int = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: _lowerCamelCase : Tuple = min(array.shape[0] for array in raw_audio ) _lowerCamelCase : Optional[int] = int(np.floor(max_length / self.chunk_stride ) ) _lowerCamelCase : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: _lowerCamelCase : Any = max(array.shape[0] for array in raw_audio ) _lowerCamelCase : Dict = int(np.ceil(max_length / self.chunk_stride ) ) _lowerCamelCase : Optional[int] = (nb_step - 1) * self.chunk_stride + self.chunk_length _lowerCamelCase : int = "max_length" else: _lowerCamelCase : Dict = input_values # normal padding on batch if padded_inputs is None: _lowerCamelCase : Any = self.pad( __lowerCAmelCase ,max_length=__lowerCAmelCase ,truncation=__lowerCAmelCase ,padding=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,) if padding: _lowerCamelCase : Tuple = padded_inputs.pop("attention_mask" ) _lowerCamelCase : str = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: _lowerCamelCase : int = example[..., None] input_values.append(example.T ) _lowerCamelCase : int = input_values if return_tensors is not None: _lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(__lowerCAmelCase ) return padded_inputs
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=7 ) -> Any: '''simple docstring''' _lowerCamelCase : Optional[Any] = None if token is not None: _lowerCamelCase : Optional[int] = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) _lowerCamelCase : List[Any] = "636036" _lowerCamelCase : Tuple = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" _lowerCamelCase : Optional[int] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json() return result["workflow_runs"] def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = get_daily_ci_runs(_lowerCamelCase ) _lowerCamelCase : Any = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _lowerCamelCase : Optional[int] = workflow_run["id"] break return workflow_run_id def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Tuple = get_last_daily_ci_runs(_lowerCamelCase ) if workflow_run_id is not None: _lowerCamelCase : Union[str, Any] = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _lowerCamelCase : Any = artifacts_links[artifact_name] download_artifact( artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[Any] = {} for artifact_name in artifact_names: _lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , F"""{artifact_name}.zip""" ) if os.path.isfile(_lowerCamelCase ): _lowerCamelCase : Tuple = {} with zipfile.ZipFile(_lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCamelCase ): # read the file with z.open(_lowerCamelCase ) as f: _lowerCamelCase : int = f.read().decode("UTF-8" ) return results
340
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: _lowerCamelCase : Dict = [144, 192, 240] _lowerCamelCase : Optional[int] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: _lowerCamelCase : Optional[int] = [96, 120, 144] _lowerCamelCase : Dict = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: _lowerCamelCase : Any = [64, 80, 96] _lowerCamelCase : List[str] = [16, 16, 24, 48, 64, 80, 320] _lowerCamelCase : Union[str, Any] = 0.0_5 _lowerCamelCase : Optional[Any] = 2.0 if mobilevit_name.startswith("deeplabv3_" ): _lowerCamelCase : List[str] = 512 _lowerCamelCase : str = 16 _lowerCamelCase : Tuple = 21 _lowerCamelCase : Union[str, Any] = "pascal-voc-id2label.json" else: _lowerCamelCase : Any = 1000 _lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = "huggingface/label-files" _lowerCamelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : List[str] = idalabel _lowerCamelCase : int = {v: k for k, v in idalabel.items()} return config def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> str: '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: _lowerCamelCase : Optional[int] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: _lowerCamelCase : str = name.replace("conv_1." , "conv_stem." ) if ".block." in name: _lowerCamelCase : str = name.replace(".block." , "." ) if "exp_1x1" in name: _lowerCamelCase : Tuple = name.replace("exp_1x1" , "expand_1x1" ) if "red_1x1" in name: _lowerCamelCase : List[Any] = name.replace("red_1x1" , "reduce_1x1" ) if ".local_rep.conv_3x3." in name: _lowerCamelCase : Union[str, Any] = name.replace(".local_rep.conv_3x3." , ".conv_kxk." ) if ".local_rep.conv_1x1." in name: _lowerCamelCase : Dict = name.replace(".local_rep.conv_1x1." , ".conv_1x1." ) if ".norm." in name: _lowerCamelCase : Union[str, Any] = name.replace(".norm." , ".normalization." ) if ".conv." in name: _lowerCamelCase : Optional[int] = name.replace(".conv." , ".convolution." ) if ".conv_proj." in name: _lowerCamelCase : Union[str, Any] = name.replace(".conv_proj." , ".conv_projection." ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: _lowerCamelCase : List[str] = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: _lowerCamelCase : Optional[int] = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: _lowerCamelCase : Union[str, Any] = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" ) if "conv_3x3" in name: _lowerCamelCase : Any = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" ) if "reduce_1x1" in name: _lowerCamelCase : List[str] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: _lowerCamelCase : Tuple = name.replace(F""".global_rep.{i}.weight""" , ".layernorm.weight" ) if F""".global_rep.{i}.bias""" in name: _lowerCamelCase : str = name.replace(F""".global_rep.{i}.bias""" , ".layernorm.bias" ) if ".global_rep." in name: _lowerCamelCase : List[Any] = name.replace(".global_rep." , ".transformer." ) if ".pre_norm_mha.0." in name: _lowerCamelCase : Tuple = name.replace(".pre_norm_mha.0." , ".layernorm_before." ) if ".pre_norm_mha.1.out_proj." in name: _lowerCamelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." ) if ".pre_norm_ffn.0." in name: _lowerCamelCase : Any = name.replace(".pre_norm_ffn.0." , ".layernorm_after." ) if ".pre_norm_ffn.1." in name: _lowerCamelCase : Dict = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." ) if ".pre_norm_ffn.4." in name: _lowerCamelCase : List[str] = name.replace(".pre_norm_ffn.4." , ".output.dense." ) if ".transformer." in name: _lowerCamelCase : Dict = name.replace(".transformer." , ".transformer.layer." ) if ".aspp_layer." in name: _lowerCamelCase : Dict = name.replace(".aspp_layer." , "." ) if ".aspp_pool." in name: _lowerCamelCase : int = name.replace(".aspp_pool." , "." ) if "seg_head." in name: _lowerCamelCase : List[Any] = name.replace("seg_head." , "segmentation_head." ) if "segmentation_head.classifier.classifier." in name: _lowerCamelCase : List[str] = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." ) if "classifier.fc." in name: _lowerCamelCase : Dict = name.replace("classifier.fc." , "classifier." ) elif (not base_model) and ("segmentation_head." not in name): _lowerCamelCase : Any = "mobilevit." + name return name def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> int: '''simple docstring''' if base_model: _lowerCamelCase : List[str] = "" else: _lowerCamelCase : Tuple = "mobilevit." for key in orig_state_dict.copy().keys(): _lowerCamelCase : Tuple = orig_state_dict.pop(_lowerCamelCase ) if key[:8] == "encoder.": _lowerCamelCase : str = key[8:] if "qkv" in key: _lowerCamelCase : Dict = key.split("." ) _lowerCamelCase : List[str] = int(key_split[0][6:] ) - 1 _lowerCamelCase : Optional[Any] = int(key_split[3] ) _lowerCamelCase : List[str] = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) _lowerCamelCase : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size _lowerCamelCase : List[Any] = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: _lowerCamelCase : int = val[:dim, :] _lowerCamelCase : Any = val[dim : dim * 2, :] _lowerCamelCase : Optional[Any] = val[-dim:, :] else: _lowerCamelCase : Tuple = val[:dim] _lowerCamelCase : Dict = val[dim : dim * 2] _lowerCamelCase : int = val[-dim:] else: _lowerCamelCase : Any = val return orig_state_dict def lowerCamelCase_( ) -> Tuple: '''simple docstring''' _lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = get_mobilevit_config(_lowerCamelCase ) # load original state_dict _lowerCamelCase : List[Any] = torch.load(_lowerCamelCase , map_location="cpu" ) # load 🤗 model if mobilevit_name.startswith("deeplabv3_" ): _lowerCamelCase : Tuple = MobileViTForSemanticSegmentation(_lowerCamelCase ).eval() else: _lowerCamelCase : List[Any] = MobileViTForImageClassification(_lowerCamelCase ).eval() _lowerCamelCase : int = convert_state_dict(_lowerCamelCase , _lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor _lowerCamelCase : Any = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ) _lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase ) _lowerCamelCase : int = outputs.logits if mobilevit_name.startswith("deeplabv3_" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": _lowerCamelCase : List[Any] = torch.tensor( [ [[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]], [[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]], [[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]], [[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]], [[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": _lowerCamelCase : List[Any] = torch.tensor( [ [[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]], [[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]], [[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": _lowerCamelCase : List[Any] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] ) elif mobilevit_name == "mobilevit_xs": _lowerCamelCase : Union[str, Any] = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] ) elif mobilevit_name == "mobilevit_xxs": _lowerCamelCase : Optional[int] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCamelCase ) if push_to_hub: _lowerCamelCase : List[Any] = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub..." ) _lowerCamelCase : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(_lowerCamelCase , organization="apple" ) model.push_to_hub(_lowerCamelCase , organization="apple" ) if __name__ == "__main__": _lowerCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _lowerCAmelCase : Dict = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
340
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class A_ ( _a ): lowerCAmelCase__ = 'megatron-bert' def __init__( self: Any ,__lowerCAmelCase: List[Any]=29_056 ,__lowerCAmelCase: Optional[Any]=1_024 ,__lowerCAmelCase: int=24 ,__lowerCAmelCase: Union[str, Any]=16 ,__lowerCAmelCase: Optional[int]=4_096 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: List[Any]=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: List[str]=1e-12 ,__lowerCAmelCase: List[str]=0 ,__lowerCAmelCase: str="absolute" ,__lowerCAmelCase: Union[str, Any]=True ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : Optional[int] = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = position_embedding_type _lowerCamelCase : Tuple = use_cache
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class A_ ( unittest.TestCase ): def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Optional[Any] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on _lowerCamelCase : List[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) ) _lowerCamelCase : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] _lowerCamelCase : Dict = {"unk_token": "<unk>"} _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) _lowerCamelCase : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], "image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } _lowerCamelCase : str = os.path.join(self.tmpdirname ,__lowerCAmelCase ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: Optional[Any] ,**__lowerCAmelCase: Any ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,pad_token="!" ,**__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,**__lowerCAmelCase: Tuple ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,pad_token="!" ,**__lowerCAmelCase ) def _lowercase ( self: Any ,**__lowerCAmelCase: Dict ): '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] _lowerCamelCase : List[Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : int = self.get_rust_tokenizer() _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : str = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname ,use_fast=__lowerCAmelCase ) _lowerCamelCase : str = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _lowerCamelCase : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer ,__lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor ,__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : int = OwlViTProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) _lowerCamelCase : Tuple = self.get_image_processor(do_normalize=__lowerCAmelCase ) _lowerCamelCase : List[str] = OwlViTProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=__lowerCAmelCase ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__lowerCAmelCase ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.get_image_processor() _lowerCamelCase : Any = self.get_tokenizer() _lowerCamelCase : Any = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : Tuple = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(__lowerCAmelCase ,return_tensors="np" ) _lowerCamelCase : List[Any] = processor(images=__lowerCAmelCase ,return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : List[str] = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : List[Any] = "lower newer" _lowerCamelCase : Tuple = processor(text=__lowerCAmelCase ,return_tensors="np" ) _lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase ,return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() ,encoded_processor[key][0].tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.get_image_processor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : List[Any] = "lower newer" _lowerCamelCase : List[Any] = self.prepare_image_inputs() _lowerCamelCase : str = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = "google/owlvit-base-patch32" _lowerCamelCase : Dict = OwlViTProcessor.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : str = ["cat", "nasa badge"] _lowerCamelCase : str = processor(text=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 16 self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape ,(2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Optional[int] = "google/owlvit-base-patch32" _lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = [["cat", "nasa badge"], ["person"]] _lowerCamelCase : str = processor(text=__lowerCAmelCase ) _lowerCamelCase : List[str] = 16 _lowerCamelCase : List[Any] = len(__lowerCAmelCase ) _lowerCamelCase : Dict = max([len(__lowerCAmelCase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape ,(batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = "google/owlvit-base-patch32" _lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : str = ["cat", "nasa badge"] _lowerCamelCase : Any = processor(text=__lowerCAmelCase ) _lowerCamelCase : List[str] = 16 _lowerCamelCase : List[Any] = inputs["input_ids"] _lowerCamelCase : Dict = [ [49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape ,(2, seq_length) ) self.assertListEqual(list(input_ids[0] ) ,predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) ,predicted_ids[1] ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : str = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : int = processor(images=__lowerCAmelCase ,query_images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Dict = processor.batch_decode(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class A_ : def __init__( self: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any=13 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=16 ,__lowerCAmelCase: Dict=[1, 2, 1] ,__lowerCAmelCase: List[str]=[2, 2, 4] ,__lowerCAmelCase: Dict=2 ,__lowerCAmelCase: Dict=2.0 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: List[Any]=0.0 ,__lowerCAmelCase: Union[str, Any]=0.0 ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: Any="gelu" ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: List[Any]=1e-5 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Tuple=10 ,__lowerCAmelCase: Any=8 ,__lowerCAmelCase: str=["stage1", "stage2", "stage3"] ,__lowerCAmelCase: int=[1, 2, 3] ,): '''simple docstring''' _lowerCamelCase : str = parent _lowerCamelCase : Optional[int] = batch_size _lowerCamelCase : int = image_size _lowerCamelCase : List[str] = patch_size _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : Any = embed_dim _lowerCamelCase : Any = depths _lowerCamelCase : List[Any] = num_heads _lowerCamelCase : str = window_size _lowerCamelCase : str = mlp_ratio _lowerCamelCase : int = qkv_bias _lowerCamelCase : Dict = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : int = drop_path_rate _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : Union[str, Any] = use_absolute_embeddings _lowerCamelCase : str = patch_norm _lowerCamelCase : Tuple = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : int = is_training _lowerCamelCase : Optional[int] = scope _lowerCamelCase : Optional[Any] = use_labels _lowerCamelCase : Union[str, Any] = type_sequence_label_size _lowerCamelCase : List[str] = encoder_stride _lowerCamelCase : str = out_features _lowerCamelCase : Any = out_indices def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : Optional[Any] = None if self.use_labels: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowerCamelCase : Union[str, Any] = self.get_config() return config, pixel_values, labels def _lowercase ( self: Optional[int] ): '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[Any] = MaskFormerSwinModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Any = model(__lowerCAmelCase ) _lowerCamelCase : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowerCamelCase : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str] ): '''simple docstring''' _lowerCamelCase : List[Any] = MaskFormerSwinBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[int] = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,[16, 32, 64] ) # verify ValueError with self.parent.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Any = ["stem"] _lowerCamelCase : str = MaskFormerSwinBackbone(config=__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = config_and_inputs _lowerCamelCase : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A_ ( _a , _a , unittest.TestCase ): lowerCAmelCase__ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCAmelCase__ = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Tuple = MaskFormerSwinModelTester(self ) _lowerCamelCase : Any = ConfigTester(self ,config_class=__lowerCAmelCase ,embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def _lowercase ( self: str ): '''simple docstring''' pass def _lowercase ( self: str ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self: List[Any] ): '''simple docstring''' return def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCAmelCase ) @unittest.skip("Swin does not use inputs_embeds" ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' pass @unittest.skip("Swin does not support feedforward chunking" ) def _lowercase ( self: Any ): '''simple docstring''' pass def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _lowerCamelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) ) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Any = [*signature.parameters.keys()] _lowerCamelCase : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] ,__lowerCAmelCase ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def _lowercase ( self: Optional[int] ): '''simple docstring''' pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def _lowercase ( self: Tuple ): '''simple docstring''' pass def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) ) _lowerCamelCase : Optional[Any] = outputs.hidden_states _lowerCamelCase : List[str] = getattr( self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase ) # Swin has a different seq_length _lowerCamelCase : Optional[int] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _lowerCamelCase : Any = True self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : List[Any] = True self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : List[Any] = 3 _lowerCamelCase : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCamelCase : Optional[Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCamelCase : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = True self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : Tuple = True self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def _lowercase ( self: str ): '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _lowercase ( self: List[Any] ): '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _lowercase ( self: str ): '''simple docstring''' pass def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__lowerCAmelCase: Tuple ): _lowerCamelCase : Optional[Any] = 0 return t def check_equivalence(__lowerCAmelCase: str ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Any={} ): with torch.no_grad(): _lowerCamelCase : Any = model(**__lowerCAmelCase ,return_dict=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Any = model(**__lowerCAmelCase ,return_dict=__lowerCAmelCase ,**__lowerCAmelCase ).to_tuple() def recursive_check(__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ): if isinstance(__lowerCAmelCase ,(List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCAmelCase ,__lowerCAmelCase ): recursive_check(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() ,dict_object.values() ): recursive_check(__lowerCAmelCase ,__lowerCAmelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCAmelCase ) ,set_nan_tensor_to_zero(__lowerCAmelCase ) ,atol=1e-5 ) ,msg=( "Tuple and dict output are not equal. Difference:" F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" F""" {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}. Dict has""" F""" `nan`: {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}.""" ) ,) recursive_check(__lowerCAmelCase ,__lowerCAmelCase ) for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) check_equivalence(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase ) _lowerCamelCase : Dict = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase ) check_equivalence(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) check_equivalence(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,{"output_hidden_states": True} ) _lowerCamelCase : Dict = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase ) check_equivalence(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,{"output_hidden_states": True} ) @require_torch class A_ ( unittest.TestCase , _a ): lowerCAmelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCAmelCase__ = MaskFormerSwinConfig def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : str = MaskFormerSwinModelTester(self ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Union[str, Any] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = backbone_class(__lowerCAmelCase ) backbone.to(__lowerCAmelCase ) backbone.eval() _lowerCamelCase : Optional[Any] = backbone(**__lowerCAmelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps ,__lowerCAmelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ): self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _lowerCamelCase : Optional[int] = backbone(**__lowerCAmelCase ,output_hidden_states=__lowerCAmelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _lowerCamelCase : List[Any] = backbone(**__lowerCAmelCase ,output_attentions=__lowerCAmelCase ) self.assertIsNotNone(outputs.attentions )
340
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
1
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( "-m" , "--pretrained_model_name_or_path" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , ) parser.add_argument( "-c" , "--caption" , type=_lowerCamelCase , default="robotic cat with wings" , help="Text used to generate images." , ) parser.add_argument( "-n" , "--images_num" , type=_lowerCamelCase , default=4 , help="How much images to generate." , ) parser.add_argument( "-s" , "--seed" , type=_lowerCamelCase , default=42 , help="Seed for random process." , ) parser.add_argument( "-ci" , "--cuda_id" , type=_lowerCamelCase , default=0 , help="cuda_id." , ) _lowerCamelCase : Any = parser.parse_args() return args def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' if not len(_lowerCamelCase ) == rows * cols: raise ValueError("The specified number of rows and columns are not correct." ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = imgs[0].size _lowerCamelCase : Optional[int] = Image.new("RGB" , size=(cols * w, rows * h) ) _lowerCamelCase, _lowerCamelCase : Dict = grid.size for i, img in enumerate(_lowerCamelCase ): grid.paste(_lowerCamelCase , box=(i % cols * w, i // cols * h) ) return grid def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="robotic cat with wings" , _lowerCamelCase=7.5 , _lowerCamelCase=50 , _lowerCamelCase=1 , _lowerCamelCase=42 , ) -> Any: '''simple docstring''' _lowerCamelCase : List[str] = torch.Generator(pipeline.device ).manual_seed(_lowerCamelCase ) _lowerCamelCase : int = pipeline( _lowerCamelCase , guidance_scale=_lowerCamelCase , num_inference_steps=_lowerCamelCase , generator=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , ).images _lowerCamelCase : str = int(math.sqrt(_lowerCamelCase ) ) _lowerCamelCase : int = image_grid(_lowerCamelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images _lowerCAmelCase : Tuple = parse_args() # Load models and create wrapper for stable diffusion _lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') _lowerCAmelCase : Dict = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') _lowerCAmelCase : Dict = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') _lowerCAmelCase : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') _lowerCAmelCase : str = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) _lowerCAmelCase : Tuple = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): _lowerCAmelCase : Optional[int] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: _lowerCAmelCase : int = unet.to(torch.device('''cuda''', args.cuda_id)) _lowerCAmelCase : str = pipeline.to(unet.device) _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) _lowerCAmelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
340
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
1
"""simple docstring""" from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("socket.socket" ) @patch("builtins.open" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Dict = Mock() _lowerCamelCase : str = conn, Mock() _lowerCamelCase : int = iter([1, None] ) _lowerCamelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase ) # ===== invoke ===== send_file(filename="mytext.txt" , testing=_lowerCamelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
340
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _lowerCamelCase : int = precision _lowerCamelCase : Dict = ceil(precision / 14 ) _lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt() _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = 13591409 _lowerCamelCase : int = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
340
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name class A_ ( _a ): def __init__( self: List[str] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ): '''simple docstring''' super().__init__() self.register_modules(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self: List[Any] ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[float] = None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' if audio_length_in_s is None: _lowerCamelCase : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate _lowerCamelCase : Tuple = audio_length_in_s * self.unet.config.sample_rate _lowerCamelCase : Optional[Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) _lowerCamelCase : Dict = int(__lowerCAmelCase ) if sample_size % down_scale_factor != 0: _lowerCamelCase : Optional[int] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) _lowerCamelCase : Union[str, Any] = int(__lowerCAmelCase ) _lowerCamelCase : Dict = next(iter(self.unet.parameters() ) ).dtype _lowerCamelCase : int = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) _lowerCamelCase : str = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=self.device ,dtype=__lowerCAmelCase ) # set step values self.scheduler.set_timesteps(__lowerCAmelCase ,device=audio.device ) _lowerCamelCase : str = self.scheduler.timesteps.to(__lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _lowerCamelCase : Optional[int] = self.unet(__lowerCAmelCase ,__lowerCAmelCase ).sample # 2. compute previous image: x_t -> t_t-1 _lowerCamelCase : str = self.scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample _lowerCamelCase : Any = audio.clamp(-1 ,1 ).float().cpu().numpy() _lowerCamelCase : str = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__lowerCAmelCase )
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" import re def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ): raise ValueError("Invalid Strand" ) return dna.translate(dna.maketrans("ATCG" , "TAGC" ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class A_ ( metaclass=_a ): lowerCAmelCase__ = ['torch', 'torchsde'] def __init__( self: List[str] ,*__lowerCAmelCase: Any ,**__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' requires_backends(self ,["torch", "torchsde"] ) @classmethod def _lowercase ( cls: List[Any] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' requires_backends(cls ,["torch", "torchsde"] ) @classmethod def _lowercase ( cls: Optional[Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Any ): '''simple docstring''' requires_backends(cls ,["torch", "torchsde"] )
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
1
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , ) -> int: '''simple docstring''' _lowerCamelCase : List[str] = {} if train_file is not None: _lowerCamelCase : Tuple = [train_file] if eval_file is not None: _lowerCamelCase : int = [eval_file] if test_file is not None: _lowerCamelCase : List[str] = [test_file] _lowerCamelCase : Union[str, Any] = datasets.load_dataset("csv" , data_files=_lowerCamelCase ) _lowerCamelCase : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) _lowerCamelCase : str = features_name.pop(_lowerCamelCase ) _lowerCamelCase : List[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) ) _lowerCamelCase : Union[str, Any] = {label: i for i, label in enumerate(_lowerCamelCase )} _lowerCamelCase : List[Any] = tokenizer.model_input_names _lowerCamelCase : List[str] = {} if len(_lowerCamelCase ) == 1: for k in files.keys(): _lowerCamelCase : Optional[int] = ds[k].map( lambda _lowerCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , ) elif len(_lowerCamelCase ) == 2: for k in files.keys(): _lowerCamelCase : Union[str, Any] = ds[k].map( lambda _lowerCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _lowerCamelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names} _lowerCamelCase : Tuple = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _lowerCamelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names} _lowerCamelCase : str = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _lowerCamelCase : Tuple = {k: v for k, v in ex.items() if k in input_names} _lowerCamelCase : int = labelaid[ex[label_name]] yield (d, label) _lowerCamelCase : Dict = ( tf.data.Dataset.from_generator( _lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _lowerCamelCase : List[str] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _lowerCamelCase : int = ( tf.data.Dataset.from_generator( _lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _lowerCamelCase : str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _lowerCamelCase : Any = ( tf.data.Dataset.from_generator( _lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _lowerCamelCase : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _lowerCAmelCase : List[Any] = logging.getLogger(__name__) @dataclass class A_ : lowerCAmelCase__ = field(metadata={'help': 'Which column contains the label'} ) lowerCAmelCase__ = field(default=_a , metadata={'help': 'The path of the training file'} ) lowerCAmelCase__ = field(default=_a , metadata={'help': 'The path of the development file'} ) lowerCAmelCase__ = field(default=_a , metadata={'help': 'The path of the test file'} ) lowerCAmelCase__ = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCAmelCase__ = field( default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A_ : lowerCAmelCase__ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCAmelCase__ = field( default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCAmelCase__ = field( default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCAmelCase__ = field(default=_a , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowerCAmelCase__ = field( default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def lowerCamelCase_( ) -> Any: '''simple docstring''' _lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ F"""16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _lowerCamelCase : int = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _lowerCamelCase : str = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(_lowerCamelCase ) -> Dict: _lowerCamelCase : Dict = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _lowerCamelCase : Any = TFTrainer( model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _lowerCamelCase : List[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) _lowerCamelCase : int = trainer.evaluate() _lowerCamelCase : List[Any] = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(_lowerCamelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) results.update(_lowerCamelCase ) return results if __name__ == "__main__": main()
340
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
1
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
1
"""simple docstring""" import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class A_ ( _a , unittest.TestCase ): lowerCAmelCase__ = GPTSwaTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = True lowerCAmelCase__ = False def _lowercase ( self: Optional[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : Dict = GPTSwaTokenizer(__lowerCAmelCase ,eos_token="<unk>" ,bos_token="<unk>" ,pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Any = "This is a test" _lowerCamelCase : List[str] = "This is a test" return input_text, output_text def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = "<s>" _lowerCamelCase : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<unk>" ) self.assertEqual(vocab_keys[1] ,"<s>" ) self.assertEqual(vocab_keys[-1] ,"j" ) self.assertEqual(len(__lowerCAmelCase ) ,2_000 ) def _lowercase ( self: Any ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,2_000 ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : str = GPTSwaTokenizer(__lowerCAmelCase ) _lowerCamelCase : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCAmelCase ,["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[465, 287, 265, 631, 842] ) _lowerCamelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( __lowerCAmelCase ,["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ,) # fmt: on _lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase ,[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,) _lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) # fmt: off self.assertListEqual( __lowerCAmelCase ,["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Any = GPTSwaTokenizer(__lowerCAmelCase ) _lowerCamelCase : str = ["This is a test", "I was born in 92000, and this is falsé."] _lowerCamelCase : Dict = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__lowerCAmelCase ,__lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(__lowerCAmelCase ) ,__lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(__lowerCAmelCase ,__lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(__lowerCAmelCase ) ,__lowerCAmelCase ) @slow def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Dict = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off _lowerCamelCase : List[str] = {"input_ids": [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase ,model_name="AI-Sweden/gpt-sw3-126m" ,sequences=__lowerCAmelCase ,)
340
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
1
"""simple docstring""" import torch def lowerCamelCase_( ) -> List[str]: '''simple docstring''' if torch.cuda.is_available(): _lowerCamelCase : Any = torch.cuda.device_count() else: _lowerCamelCase : List[str] = 0 print(F"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
340
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def lowerCamelCase_( _lowerCamelCase = 100 ) -> int: '''simple docstring''' _lowerCamelCase : Dict = 1 _lowerCamelCase : List[Any] = 2 for i in range(2 , max_n + 1 ): _lowerCamelCase : List[Any] = pre_numerator _lowerCamelCase : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1 _lowerCamelCase : Optional[Any] = cur_numerator _lowerCamelCase : Tuple = e_cont * pre_numerator + temp return sum_digits(_lowerCamelCase ) if __name__ == "__main__": print(f'''{solution() = }''')
340
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
1
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
"""simple docstring""" import math def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCamelCase_( _lowerCamelCase = 10001 ) -> int: '''simple docstring''' try: _lowerCamelCase : Dict = int(_lowerCamelCase ) except (TypeError, ValueError): raise TypeError("Parameter nth must be int or castable to int." ) from None if nth <= 0: raise ValueError("Parameter nth must be greater than or equal to one." ) _lowerCamelCase : list[int] = [] _lowerCamelCase : int = 2 while len(_lowerCamelCase ) < nth: if is_prime(_lowerCamelCase ): primes.append(_lowerCamelCase ) num += 1 else: num += 1 return primes[len(_lowerCamelCase ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
340
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
1
"""simple docstring""" import argparse _lowerCAmelCase : Tuple = '''docs/source/_static/js/custom.js''' def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' with open(_lowerCamelCase , encoding="utf-8" , newline="\n" ) as f: _lowerCamelCase : Optional[int] = f.readlines() _lowerCamelCase : Optional[Any] = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 _lowerCamelCase : Optional[int] = F"""const stableVersion = \"v{version}\"\n""" # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n""" with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') _lowerCAmelCase : Dict = parser.parse_args() update_custom_js(args.version)
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase = 1 , _lowerCamelCase = 1000 ) -> int: '''simple docstring''' _lowerCamelCase : Union[str, Any] = 1 _lowerCamelCase : Optional[int] = 0 for divide_by_number in range(_lowerCamelCase , digit + 1 ): _lowerCamelCase : list[int] = [] _lowerCamelCase : Dict = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(_lowerCamelCase ): _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : Tuple = divide_by_number else: has_been_divided.append(_lowerCamelCase ) _lowerCamelCase : Any = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class A_ ( _a ): @slow @require_torch def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" ,"prajjwal1/bert-tiny" ) _lowerCamelCase : List[Any] = BertTokenizer.from_pretrained("bert-base-uncased" ) _lowerCamelCase : List[str] = bertabert.config.encoder.vocab_size _lowerCamelCase : Any = tokenizer.sep_token_id _lowerCamelCase : List[Any] = tokenizer.cls_token_id _lowerCamelCase : List[str] = 128 _lowerCamelCase : int = datasets.load_dataset("cnn_dailymail" ,"3.0.0" ,split="train[:1%]" ) _lowerCamelCase : Optional[Any] = datasets.load_dataset("cnn_dailymail" ,"3.0.0" ,split="validation[:1%]" ) _lowerCamelCase : Any = train_dataset.select(range(32 ) ) _lowerCamelCase : int = val_dataset.select(range(16 ) ) _lowerCamelCase : Optional[Any] = 4 def _map_to_encoder_decoder_inputs(__lowerCAmelCase: List[str] ): # Tokenizer will automatically set [BOS] <text> [EOS] _lowerCamelCase : int = tokenizer(batch["article"] ,padding="max_length" ,truncation=__lowerCAmelCase ,max_length=512 ) _lowerCamelCase : Tuple = tokenizer(batch["highlights"] ,padding="max_length" ,truncation=__lowerCAmelCase ,max_length=128 ) _lowerCamelCase : List[Any] = inputs.input_ids _lowerCamelCase : Dict = inputs.attention_mask _lowerCamelCase : int = outputs.input_ids _lowerCamelCase : Optional[Any] = outputs.input_ids.copy() _lowerCamelCase : Tuple = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] _lowerCamelCase : Tuple = outputs.attention_mask assert all(len(__lowerCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__lowerCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__lowerCAmelCase: Tuple ): _lowerCamelCase : Tuple = pred.label_ids _lowerCamelCase : str = pred.predictions # all unnecessary tokens are removed _lowerCamelCase : str = tokenizer.batch_decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCAmelCase ) )] ) / len(__lowerCAmelCase ) return {"accuracy": accuracy} # map train dataset _lowerCamelCase : Optional[int] = train_dataset.map( _map_to_encoder_decoder_inputs ,batched=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,remove_columns=["article", "highlights"] ,) train_dataset.set_format( type="torch" ,columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] ,) # same for validation dataset _lowerCamelCase : Dict = val_dataset.map( _map_to_encoder_decoder_inputs ,batched=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,remove_columns=["article", "highlights"] ,) val_dataset.set_format( type="torch" ,columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] ,) _lowerCamelCase : Tuple = self.get_auto_remove_tmp_dir() _lowerCamelCase : int = SeqaSeqTrainingArguments( output_dir=__lowerCAmelCase ,per_device_train_batch_size=__lowerCAmelCase ,per_device_eval_batch_size=__lowerCAmelCase ,predict_with_generate=__lowerCAmelCase ,evaluation_strategy="steps" ,do_train=__lowerCAmelCase ,do_eval=__lowerCAmelCase ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,) # instantiate trainer _lowerCamelCase : Union[str, Any] = SeqaSeqTrainer( model=__lowerCAmelCase ,args=__lowerCAmelCase ,compute_metrics=_compute_metrics ,train_dataset=__lowerCAmelCase ,eval_dataset=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,) # start training trainer.train()
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
1
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[int] = FileLock(str(tmpdir / "foo.lock" ) ) _lowerCamelCase : List[str] = FileLock(str(tmpdir / "foo.lock" ) ) _lowerCamelCase : List[str] = 0.0_1 with locka.acquire(): with pytest.raises(_lowerCamelCase ): _lowerCamelCase : Dict = time.time() locka.acquire(_lowerCamelCase ) assert time.time() - _start > timeout def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : str = "a" * 1000 + ".lock" _lowerCamelCase : Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(_lowerCamelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 _lowerCamelCase : Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_lowerCamelCase ): locka.acquire(0 )
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class A_ ( metaclass=_a ): lowerCAmelCase__ = ['note_seq'] def __init__( self: Tuple ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' requires_backends(self ,["note_seq"] ) @classmethod def _lowercase ( cls: Dict ,*__lowerCAmelCase: int ,**__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' requires_backends(cls ,["note_seq"] ) @classmethod def _lowercase ( cls: List[Any] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: List[str] ): '''simple docstring''' requires_backends(cls ,["note_seq"] )
340
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class A_ ( unittest.TestCase ): def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : List[Any] = tempfile.mkdtemp() _lowerCamelCase : List[Any] = BlipImageProcessor() _lowerCamelCase : str = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) _lowerCamelCase : Tuple = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" ) _lowerCamelCase : Any = InstructBlipProcessor(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self: Tuple ,**__lowerCAmelCase: int ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ).tokenizer def _lowercase ( self: str ,**__lowerCAmelCase: Any ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ).image_processor def _lowercase ( self: List[Any] ,**__lowerCAmelCase: List[str] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ).qformer_tokenizer def _lowercase ( self: Dict ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : List[Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] _lowerCamelCase : List[str] = [Image.fromarray(np.moveaxis(__lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : int = InstructBlipProcessor( tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,) processor.save_pretrained(self.tmpdirname ) _lowerCamelCase : Any = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) _lowerCamelCase : int = self.get_image_processor(do_normalize=__lowerCAmelCase ,padding_value=1.0 ) _lowerCamelCase : Union[str, Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=__lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__lowerCAmelCase ) self.assertIsInstance(processor.qformer_tokenizer ,__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : str = self.get_image_processor() _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : str = self.get_qformer_tokenizer() _lowerCamelCase : Any = InstructBlipProcessor( tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ,qformer_tokenizer=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : str = image_processor(__lowerCAmelCase ,return_tensors="np" ) _lowerCamelCase : List[str] = processor(images=__lowerCAmelCase ,return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : Any = self.get_qformer_tokenizer() _lowerCamelCase : Union[str, Any] = InstructBlipProcessor( tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ,qformer_tokenizer=__lowerCAmelCase ) _lowerCamelCase : Any = "lower newer" _lowerCamelCase : Optional[int] = processor(text=__lowerCAmelCase ) _lowerCamelCase : Dict = tokenizer(__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = qformer_tokenizer(__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor["qformer_" + key] ) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : int = self.get_qformer_tokenizer() _lowerCamelCase : Optional[int] = InstructBlipProcessor( tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ,qformer_tokenizer=__lowerCAmelCase ) _lowerCamelCase : Dict = "lower newer" _lowerCamelCase : int = self.prepare_image_inputs() _lowerCamelCase : List[str] = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase ) self.assertListEqual( list(inputs.keys() ) ,["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] ,) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : str = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : int = self.get_qformer_tokenizer() _lowerCamelCase : str = InstructBlipProcessor( tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ,qformer_tokenizer=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Dict = processor.batch_decode(__lowerCAmelCase ) _lowerCamelCase : Any = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = self.get_qformer_tokenizer() _lowerCamelCase : Dict = InstructBlipProcessor( tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ,qformer_tokenizer=__lowerCAmelCase ) _lowerCamelCase : Tuple = "lower newer" _lowerCamelCase : Any = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase ) self.assertListEqual( list(inputs.keys() ) ,["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] ,)
340
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
1
"""simple docstring""" _lowerCAmelCase : dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.609_344, "knot": 1.852, } _lowerCAmelCase : dict[str, float] = { "km/h": 1.0, "m/s": 0.277_777_778, "mph": 0.621_371_192, "knot": 0.539_956_803, } def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if unit_to not in speed_chart or unit_from not in speed_chart_inverse: _lowerCamelCase : Union[str, Any] = ( F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n""" F"""Valid values are: {', '.join(_lowerCamelCase )}""" ) raise ValueError(_lowerCamelCase ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings _lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class A_ ( _a ): lowerCAmelCase__ = field(default=_a , metadata={'help': 'Whether to use SortishSampler or not.'} ) lowerCAmelCase__ = field( default=_a , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) lowerCAmelCase__ = field( default=_a , metadata={ 'help': ( 'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `max_length` value of the model configuration.' ) } , ) lowerCAmelCase__ = field( default=_a , metadata={ 'help': ( 'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `num_beams` value of the model configuration.' ) } , ) lowerCAmelCase__ = field( default=_a , metadata={ 'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.' } , ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[Any] = super().to_dict() for k, v in d.items(): if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Optional[int] = v.to_dict() return d
340
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _lowerCAmelCase : List[Any] = 25_6047 _lowerCAmelCase : Optional[Any] = 25_6145 @require_sentencepiece @require_tokenizers class A_ ( _a , unittest.TestCase ): lowerCAmelCase__ = NllbTokenizer lowerCAmelCase__ = NllbTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = {} def _lowercase ( self: List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : str = NllbTokenizer(__lowerCAmelCase ,keep_accents=__lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[Any] = NllbTokenizer(__lowerCAmelCase ,keep_accents=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCAmelCase ,["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) _lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCAmelCase ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] ,) _lowerCamelCase : Tuple = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) _lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] ,) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : int = tempfile.mkdtemp() _lowerCamelCase : int = tokenizer_r.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) _lowerCamelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(__lowerCAmelCase ,__lowerCAmelCase ) # Checks everything loads correctly in the same way _lowerCamelCase : Union[str, Any] = tokenizer_r.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : List[Any] = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase ,__lowerCAmelCase ) ) shutil.rmtree(__lowerCAmelCase ) # Save tokenizer rust, legacy_format=True _lowerCamelCase : Optional[Any] = tempfile.mkdtemp() _lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCAmelCase ,legacy_format=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase ,__lowerCAmelCase ) # Checks everything loads correctly in the same way _lowerCamelCase : List[Any] = tokenizer_r.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : str = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase ,__lowerCAmelCase ) ) shutil.rmtree(__lowerCAmelCase ) # Save tokenizer rust, legacy_format=False _lowerCamelCase : List[str] = tempfile.mkdtemp() _lowerCamelCase : Optional[Any] = tokenizer_r.save_pretrained(__lowerCAmelCase ,legacy_format=__lowerCAmelCase ) _lowerCamelCase : int = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCamelCase : Any = tokenizer_r.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : str = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase ,__lowerCAmelCase ) ) shutil.rmtree(__lowerCAmelCase ) @require_torch def _lowercase ( self: Any ): '''simple docstring''' if not self.test_seqaseq: return _lowerCamelCase : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. _lowerCamelCase : Tuple = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] _lowerCamelCase : Tuple = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" " Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi" " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: _lowerCamelCase : int = tokenizer.prepare_seqaseq_batch( src_texts=__lowerCAmelCase ,tgt_texts=__lowerCAmelCase ,max_length=3 ,max_target_length=10 ,return_tensors="pt" ,src_lang="eng_Latn" ,tgt_lang="ron_Latn" ,) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,10 ) # max_target_length will default to max_length if not specified _lowerCamelCase : int = tokenizer.prepare_seqaseq_batch( __lowerCAmelCase ,tgt_texts=__lowerCAmelCase ,max_length=3 ,return_tensors="pt" ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,3 ) _lowerCamelCase : List[Any] = tokenizer.prepare_seqaseq_batch( src_texts=__lowerCAmelCase ,max_length=3 ,max_target_length=10 ,return_tensors="pt" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 ) self.assertNotIn("decoder_input_ids" ,__lowerCAmelCase ) @unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." ) def _lowercase ( self: int ): '''simple docstring''' pass def _lowercase ( self: Dict ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowerCamelCase : str = [AddedToken("<special>" ,lstrip=__lowerCAmelCase )] _lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase ,additional_special_tokens=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Tuple = tokenizer_r.encode("Hey this is a <special> token" ) _lowerCamelCase : Any = tokenizer_r.encode("<special>" ,add_special_tokens=__lowerCAmelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: _lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase ,additional_special_tokens=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Dict = self.tokenizer_class.from_pretrained( __lowerCAmelCase ,additional_special_tokens=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Dict = tokenizer_p.encode("Hey this is a <special> token" ) _lowerCamelCase : List[str] = tokenizer_cr.encode("Hey this is a <special> token" ) self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class A_ ( unittest.TestCase ): lowerCAmelCase__ = 'facebook/nllb-200-distilled-600M' lowerCAmelCase__ = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowerCAmelCase__ = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowerCAmelCase__ = [ 2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 8_1_6_5, 2_4_8_0_6_6, 1_4_7_3_4, 9_5_0, 1_1_3_5, 1_0_5_7_2_1, 3_5_7_3, 8_3, 2_7_3_5_2, 1_0_8, 4_9_4_8_6, 2, ] @classmethod def _lowercase ( cls: int ): '''simple docstring''' _lowerCamelCase : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name ,src_lang="eng_Latn" ,tgt_lang="ron_Latn" ) _lowerCamelCase : str = 1 return cls def _lowercase ( self: Optional[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] ,256_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] ,256_002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] ,256_057 ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' self.assertIn(__lowerCAmelCase ,self.tokenizer.all_special_ids ) # fmt: off _lowerCamelCase : Union[str, Any] = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047] # fmt: on _lowerCamelCase : List[Any] = self.tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : Tuple = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token ,__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] ,__lowerCAmelCase ) _lowerCamelCase : List[str] = 10 _lowerCamelCase : Dict = self.tokenizer(__lowerCAmelCase ,max_length=__lowerCAmelCase ,truncation=__lowerCAmelCase ).input_ids[0] self.assertEqual(ids[-1] ,2 ) self.assertEqual(ids[0] ,__lowerCAmelCase ) self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) ,[256_203, 3] ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : int = tempfile.mkdtemp() _lowerCamelCase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = NllbTokenizer.from_pretrained(__lowerCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,__lowerCAmelCase ) @require_torch def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : str = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,) _lowerCamelCase : List[Any] = shift_tokens_right( batch["labels"] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id["ron_Latn"] ) self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase ) self.assertEqual((2, 15) ,batch.input_ids.shape ) self.assertEqual((2, 15) ,batch.attention_mask.shape ) _lowerCamelCase : Any = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase ,batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Dict = self.tokenizer(self.src_text ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=3 ,return_tensors="pt" ) _lowerCamelCase : Union[str, Any] = self.tokenizer( text_target=self.tgt_text ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=10 ,return_tensors="pt" ) _lowerCamelCase : Union[str, Any] = targets["input_ids"] _lowerCamelCase : Tuple = shift_tokens_right( __lowerCAmelCase ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : List[str] = self.tokenizer._build_translation_inputs( "A test" ,return_tensors="pt" ,src_lang="eng_Latn" ,tgt_lang="fra_Latn" ) self.assertEqual( nested_simplify(__lowerCAmelCase ) ,{ # A, test, EOS, en_XX "input_ids": [[256_047, 70, 7_356, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 256_057, } ,) @require_torch def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = True _lowerCamelCase : Optional[Any] = self.tokenizer( "UN Chief says there is no military solution in Syria" ,src_lang="eng_Latn" ,tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids ,[16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047] ) _lowerCamelCase : Any = False _lowerCamelCase : Any = self.tokenizer( "UN Chief says there is no military solution in Syria" ,src_lang="eng_Latn" ,tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids ,[256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2] )
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _lowerCamelCase : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> int: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _lowerCamelCase : List[str] = "" else: _lowerCamelCase : Optional[int] = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowerCamelCase : int = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) _lowerCamelCase : Any = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : List[Any] = in_proj_weight[ : config.hidden_size, : ] _lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size] _lowerCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowerCamelCase : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowerCamelCase : List[Any] = in_proj_weight[ -config.hidden_size :, : ] _lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :] def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : List[str] = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : str = dct.pop(_lowerCamelCase ) _lowerCamelCase : Tuple = val def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : str = ViTMSNConfig() _lowerCamelCase : List[Any] = 1000 _lowerCamelCase : List[str] = "datasets/huggingface/label-files" _lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase ) , "r" ) ) _lowerCamelCase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[int] = idalabel _lowerCamelCase : Dict = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: _lowerCamelCase : Union[str, Any] = 384 _lowerCamelCase : Tuple = 1536 _lowerCamelCase : Tuple = 6 elif "l16" in checkpoint_url: _lowerCamelCase : Dict = 1024 _lowerCamelCase : Dict = 4096 _lowerCamelCase : List[str] = 24 _lowerCamelCase : Any = 16 _lowerCamelCase : Optional[int] = 0.1 elif "b4" in checkpoint_url: _lowerCamelCase : List[str] = 4 elif "l7" in checkpoint_url: _lowerCamelCase : Optional[int] = 7 _lowerCamelCase : Tuple = 1024 _lowerCamelCase : Dict = 4096 _lowerCamelCase : Dict = 24 _lowerCamelCase : Dict = 16 _lowerCamelCase : List[Any] = 0.1 _lowerCamelCase : Union[str, Any] = ViTMSNModel(_lowerCamelCase ) _lowerCamelCase : str = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["target_encoder"] _lowerCamelCase : Optional[int] = ViTImageProcessor(size=config.image_size ) remove_projection_head(_lowerCamelCase ) _lowerCamelCase : str = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() _lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) _lowerCamelCase : Optional[Any] = ViTImageProcessor( size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase ) _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) _lowerCamelCase : str = model(**_lowerCamelCase ) _lowerCamelCase : List[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: _lowerCamelCase : List[Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: _lowerCamelCase : List[Any] = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: _lowerCamelCase : Optional[int] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: _lowerCamelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: _lowerCamelCase : int = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
340
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCAmelCase : List[Any] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
1
"""simple docstring""" import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class A_ ( _a ): def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : List[Any] = tempfile.mkdtemp() _lowerCamelCase : List[str] = 5 # Realm tok _lowerCamelCase : List[str] = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _lowerCamelCase : int = os.path.join(self.tmpdirname ,"realm_tokenizer" ) os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase ) _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) _lowerCamelCase : Any = os.path.join(self.tmpdirname ,"realm_block_records" ) os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"realm_tokenizer" ) ) def _lowercase ( self: Dict ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records ) return config def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : Dict = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Any = np.array( [ b"This is the first record", b"This is the second record", b"This is the third record", b"This is the fourth record", b"This is the fifth record", b"This is a longer longer longer record", ] ,dtype=__lowerCAmelCase ,) return block_records def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Tuple = RealmRetriever( block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,) return retriever def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Dict = self.get_config() _lowerCamelCase : str = self.get_dummy_retriever() _lowerCamelCase : Union[str, Any] = retriever.tokenizer _lowerCamelCase : Dict = np.array([0, 3] ,dtype="long" ) _lowerCamelCase : List[Any] = tokenizer(["Test question"] ).input_ids _lowerCamelCase : Optional[int] = tokenizer( ["the fourth"] ,add_special_tokens=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,).input_ids _lowerCamelCase : int = config.reader_seq_len _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = retriever( __lowerCAmelCase ,__lowerCAmelCase ,answer_ids=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors="np" ) self.assertEqual(len(__lowerCAmelCase ) ,2 ) self.assertEqual(len(__lowerCAmelCase ) ,2 ) self.assertEqual(len(__lowerCAmelCase ) ,2 ) self.assertEqual(concat_inputs.input_ids.shape ,(2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape ,(2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] ,) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] ,) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : List[str] = self.get_config() _lowerCamelCase : Dict = self.get_dummy_retriever() _lowerCamelCase : Optional[Any] = retriever.tokenizer _lowerCamelCase : Tuple = np.array([0, 3, 5] ,dtype="long" ) _lowerCamelCase : Any = tokenizer(["Test question"] ).input_ids _lowerCamelCase : int = tokenizer( ["the fourth", "longer longer"] ,add_special_tokens=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,).input_ids _lowerCamelCase : List[str] = config.reader_seq_len _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = retriever( __lowerCAmelCase ,__lowerCAmelCase ,answer_ids=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors="np" ) self.assertEqual([False, True, True] ,__lowerCAmelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,__lowerCAmelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,__lowerCAmelCase ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : List[Any] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname ,"realm_block_records" ) ) # Test local path _lowerCamelCase : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname ,"realm_block_records" ) ) self.assertEqual(retriever.block_records[0] ,b"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: _lowerCamelCase : Any = os.path.join( os.path.join(self.tmpdirname ,"realm_block_records" ) ,_REALM_BLOCK_RECORDS_FILENAME ) _lowerCamelCase : Tuple = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] ,b"This is the first record" )
340
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _lowerCamelCase : int = precision _lowerCamelCase : Dict = ceil(precision / 14 ) _lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt() _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = 13591409 _lowerCamelCase : int = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
340
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A_ : def __init__( self: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str]=13 ,__lowerCAmelCase: Union[str, Any]=30 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: str=3 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Union[str, Any]=4 ,__lowerCAmelCase: Union[str, Any]=37 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Tuple=10 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Tuple=2 ,): '''simple docstring''' _lowerCamelCase : Dict = parent _lowerCamelCase : int = batch_size _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[str] = patch_size _lowerCamelCase : str = num_channels _lowerCamelCase : int = is_training _lowerCamelCase : List[str] = use_labels _lowerCamelCase : str = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Dict = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : Tuple = hidden_dropout_prob _lowerCamelCase : Dict = attention_probs_dropout_prob _lowerCamelCase : str = type_sequence_label_size _lowerCamelCase : int = initializer_range _lowerCamelCase : Dict = scope _lowerCamelCase : Dict = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _lowerCamelCase : Tuple = (image_size // patch_size) ** 2 _lowerCamelCase : str = num_patches + 2 def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : int = None if self.use_labels: _lowerCamelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowerCamelCase : str = self.get_config() return config, pixel_values, labels def _lowercase ( self: str ): '''simple docstring''' return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : int = TFDeiTModel(config=__lowerCAmelCase ) _lowerCamelCase : Tuple = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : int = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase ) _lowerCamelCase : str = model(__lowerCAmelCase ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCamelCase : Any = 1 _lowerCamelCase : List[Any] = TFDeiTForMaskedImageModeling(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def _lowercase ( self: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.type_sequence_label_size _lowerCamelCase : List[str] = TFDeiTForImageClassification(__lowerCAmelCase ) _lowerCamelCase : Tuple = model(__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCamelCase : Any = 1 _lowerCamelCase : List[Any] = TFDeiTForImageClassification(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : int = model(__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = config_and_inputs _lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class A_ ( _a , _a , unittest.TestCase ): lowerCAmelCase__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowerCAmelCase__ = ( { 'feature-extraction': TFDeiTModel, 'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : str = TFDeiTModelTester(self ) _lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 ) def _lowercase ( self: List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) _lowerCamelCase : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase ,tf.keras.layers.Dense ) ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] _lowerCamelCase : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] ,__lowerCAmelCase ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: str=False ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = super()._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _lowercase ( self: Optional[int] ): '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = TFDeiTModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def lowerCamelCase_( ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class A_ ( unittest.TestCase ): @cached_property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : int = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : List[Any] = image_processor(images=__lowerCAmelCase ,return_tensors="tf" ) # forward pass _lowerCamelCase : List[str] = model(**__lowerCAmelCase ) # verify the logits _lowerCamelCase : Any = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape ,__lowerCAmelCase ) _lowerCamelCase : Any = tf.constant([-1.02_66, 0.19_12, -1.28_61] ) self.assertTrue(np.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : List[Any] = [ '''word_embeddings_layernorm.weight''', '''word_embeddings_layernorm.bias''', '''input_layernorm.weight''', '''input_layernorm.bias''', '''post_attention_layernorm.weight''', '''post_attention_layernorm.bias''', '''self_attention.dense.bias''', '''mlp.dense_4h_to_h.bias''', '''ln_f.weight''', '''ln_f.bias''', ] _lowerCAmelCase : str = [ '''mlp.dense_4h_to_h.weight''', '''self_attention.dense.weight''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Any = { "word_embeddings.weight": "word_embeddings.weight", "word_embeddings.norm.weight": "word_embeddings_layernorm.weight", "word_embeddings.norm.bias": "word_embeddings_layernorm.bias", "weight": "ln_f.weight", "bias": "ln_f.bias", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks _lowerCamelCase : Optional[Any] = int(re.match(R".*layer_(\d*).*" , _lowerCamelCase )[1] ) layer_number -= 3 return F"""h.{layer_number}.""" + key def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' if dtype == torch.bool: return 1 / 8 _lowerCamelCase : Dict = re.search(R"[^\d](\d+)$" , str(_lowerCamelCase ) ) if bit_search is None: raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" ) _lowerCamelCase : Dict = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' if bloom_config_file == "": _lowerCamelCase : Tuple = BloomConfig() else: _lowerCamelCase : Optional[Any] = BloomConfig.from_json_file(_lowerCamelCase ) if shard_model: _lowerCamelCase : str = os.listdir(_lowerCamelCase ) _lowerCamelCase : Dict = sorted(filter(lambda _lowerCamelCase : s.startswith("layer" ) and "model_00" in s , _lowerCamelCase ) ) _lowerCamelCase : Optional[Any] = {"weight_map": {}, "metadata": {}} _lowerCamelCase : Dict = 0 _lowerCamelCase : int = None _lowerCamelCase : List[str] = BloomConfig() for j, file in enumerate(_lowerCamelCase ): print("Processing file: {}".format(_lowerCamelCase ) ) _lowerCamelCase : Union[str, Any] = None for i in range(_lowerCamelCase ): # load all TP files _lowerCamelCase : Optional[Any] = file.replace("model_00" , F"""model_0{i}""" ) _lowerCamelCase : Any = torch.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) , map_location="cpu" ) # Rename keys in the transformers names _lowerCamelCase : List[str] = list(temp.keys() ) for key in keys: _lowerCamelCase : Dict = temp.pop(_lowerCamelCase ) if tensors is None: _lowerCamelCase : List[Any] = temp else: for key in tensors.keys(): if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel _lowerCamelCase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks _lowerCamelCase : Any = torch.cat([tensors[key], temp[key]] , dim=_lowerCamelCase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): _lowerCamelCase : Optional[Any] = tensors[key] / pretraining_tp torch.save( _lowerCamelCase , os.path.join( _lowerCamelCase , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(_lowerCamelCase ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): _lowerCamelCase : int = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: _lowerCamelCase : Union[str, Any] = "pytorch_model_{}-of-{}.bin".format( str(j + 1 ).zfill(5 ) , str(len(_lowerCamelCase ) ).zfill(5 ) ) _lowerCamelCase : str = BloomConfig() _lowerCamelCase : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME _lowerCamelCase : str = total_size with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) with open(os.path.join(_lowerCamelCase , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f: _lowerCamelCase : Optional[Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + "\n" f.write(_lowerCamelCase ) else: _lowerCamelCase : List[Any] = BloomModel(_lowerCamelCase ) _lowerCamelCase : List[str] = os.listdir(_lowerCamelCase ) _lowerCamelCase : List[Any] = sorted(filter(lambda _lowerCamelCase : s.startswith("layer" ) and "model_00" in s , _lowerCamelCase ) ) _lowerCamelCase : Optional[Any] = None for i, file in enumerate(_lowerCamelCase ): _lowerCamelCase : Tuple = None for i in range(_lowerCamelCase ): # load all TP files _lowerCamelCase : str = file.replace("model_00" , F"""model_0{i}""" ) _lowerCamelCase : str = torch.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) , map_location="cpu" ) # Rename keys in the transformers names _lowerCamelCase : int = list(temp.keys() ) for key in keys: _lowerCamelCase : Union[str, Any] = temp.pop(_lowerCamelCase ) if tensors is None: _lowerCamelCase : Optional[int] = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel _lowerCamelCase : List[str] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks _lowerCamelCase : Union[str, Any] = torch.cat([tensors[key], temp[key]] , dim=_lowerCamelCase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): _lowerCamelCase : Dict = tensors[key] / pretraining_tp _lowerCamelCase : List[Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected""" if missing_keys is None: _lowerCamelCase : List[str] = set(other_keys.missing_keys ) else: _lowerCamelCase : Any = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F"""The keys {missing_keys} are missing""" # Save pytorch-model os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME _lowerCamelCase : str = pytorch_dump_folder_path + "/" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" ) if config.torch_dtype is not None: _lowerCamelCase : Any = model.to(config.torch_dtype ) torch.save(model.state_dict() , _lowerCamelCase ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _lowerCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if index == number_of_items: return 0 _lowerCamelCase : str = 0 _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Tuple = knapsack(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 ) if weights[index] <= max_weight: _lowerCamelCase : int = values[index] + knapsack( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_weight - weights[index] , index + 1 ) return max(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
1
"""simple docstring""" # Imports import numpy as np class A_ : def __init__( self: List[str] ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Dict=None ): '''simple docstring''' self.set_matricies(red=__lowerCAmelCase ,green=__lowerCAmelCase ,blue=__lowerCAmelCase ,red_edge=__lowerCAmelCase ,nir=__lowerCAmelCase ) def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[int]=None ): '''simple docstring''' if red is not None: _lowerCamelCase : List[Any] = red if green is not None: _lowerCamelCase : str = green if blue is not None: _lowerCamelCase : Dict = blue if red_edge is not None: _lowerCamelCase : Union[str, Any] = red_edge if nir is not None: _lowerCamelCase : str = nir return True def _lowercase ( self: List[str] ,__lowerCAmelCase: int="" ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: List[str]=None ): '''simple docstring''' self.set_matricies(red=__lowerCAmelCase ,green=__lowerCAmelCase ,blue=__lowerCAmelCase ,red_edge=__lowerCAmelCase ,nir=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = { "ARVI2": self.arvaa, "CCCI": self.ccci, "CVI": self.cvi, "GLI": self.gli, "NDVI": self.ndvi, "BNDVI": self.bndvi, "redEdgeNDVI": self.red_edge_ndvi, "GNDVI": self.gndvi, "GBNDVI": self.gbndvi, "GRNDVI": self.grndvi, "RBNDVI": self.rbndvi, "PNDVI": self.pndvi, "ATSAVI": self.atsavi, "BWDRVI": self.bwdrvi, "CIgreen": self.ci_green, "CIrededge": self.ci_rededge, "CI": self.ci, "CTVI": self.ctvi, "GDVI": self.gdvi, "EVI": self.evi, "GEMI": self.gemi, "GOSAVI": self.gosavi, "GSAVI": self.gsavi, "Hue": self.hue, "IVI": self.ivi, "IPVI": self.ipvi, "I": self.i, "RVI": self.rvi, "MRVI": self.mrvi, "MSAVI": self.m_savi, "NormG": self.norm_g, "NormNIR": self.norm_nir, "NormR": self.norm_r, "NGRDI": self.ngrdi, "RI": self.ri, "S": self.s, "IF": self._if, "DVI": self.dvi, "TVI": self.tvi, "NDRE": self.ndre, } try: return funcs[index]() except KeyError: print("Index not in the list!" ) return False def _lowercase ( self: Optional[Any] ): '''simple docstring''' return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def _lowercase ( self: List[str] ): '''simple docstring''' return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _lowercase ( self: int ): '''simple docstring''' return self.nir * (self.red / (self.green**2)) def _lowercase ( self: List[str] ): '''simple docstring''' return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return (self.nir - self.red) / (self.nir + self.red) def _lowercase ( self: int ): '''simple docstring''' return (self.nir - self.blue) / (self.nir + self.blue) def _lowercase ( self: Optional[Any] ): '''simple docstring''' return (self.redEdge - self.red) / (self.redEdge + self.red) def _lowercase ( self: Dict ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green) def _lowercase ( self: List[str] ): '''simple docstring''' return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _lowercase ( self: Tuple ): '''simple docstring''' return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _lowercase ( self: Dict ): '''simple docstring''' return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _lowercase ( self: str ): '''simple docstring''' return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0.08 ,__lowerCAmelCase: Tuple=1.22 ,__lowerCAmelCase: str=0.03 ): '''simple docstring''' return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _lowercase ( self: Optional[int] ): '''simple docstring''' return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _lowercase ( self: Dict ): '''simple docstring''' return (self.nir / self.green) - 1 def _lowercase ( self: Optional[Any] ): '''simple docstring''' return (self.nir / self.redEdge) - 1 def _lowercase ( self: Tuple ): '''simple docstring''' return (self.red - self.blue) / self.red def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Any = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _lowercase ( self: List[str] ): '''simple docstring''' return self.nir - self.green def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str]=0.16 ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green + y) def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any]=0.5 ): '''simple docstring''' return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ): '''simple docstring''' return (self.nir - b) / (a * self.red) def _lowercase ( self: Dict ): '''simple docstring''' return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _lowercase ( self: Any ): '''simple docstring''' return (self.red + self.green + self.blue) / 30.5 def _lowercase ( self: List[Any] ): '''simple docstring''' return self.nir / self.red def _lowercase ( self: Any ): '''simple docstring''' return (self.rvi() - 1) / (self.rvi() + 1) def _lowercase ( self: Dict ): '''simple docstring''' return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _lowercase ( self: int ): '''simple docstring''' return self.green / (self.nir + self.red + self.green) def _lowercase ( self: int ): '''simple docstring''' return self.nir / (self.nir + self.red + self.green) def _lowercase ( self: str ): '''simple docstring''' return self.red / (self.nir + self.red + self.green) def _lowercase ( self: Optional[int] ): '''simple docstring''' return (self.green - self.red) / (self.green + self.red) def _lowercase ( self: str ): '''simple docstring''' return (self.red - self.green) / (self.red + self.green) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : str = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) _lowerCamelCase : str = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _lowercase ( self: Any ): '''simple docstring''' return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return self.nir / self.red def _lowercase ( self: Optional[Any] ): '''simple docstring''' return (self.ndvi() + 0.5) ** (1 / 2) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return (self.nir - self.redEdge) / (self.nir + self.redEdge)
340
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : List[Any] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = [ '''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SEWForCTC''', '''SEWForSequenceClassification''', '''SEWModel''', '''SEWPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys _lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
1
"""simple docstring""" import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: _lowerCamelCase : Union[str, Any] = TOKENIZER_CLASSES else: _lowerCamelCase : str = {tokenizer_name: getattr(_lowerCamelCase , tokenizer_name + "Fast" )} logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: _lowerCamelCase : Dict = TOKENIZER_CLASSES[tokenizer_name] _lowerCamelCase : int = True if checkpoint_name is None: _lowerCamelCase : Any = list(tokenizer_class.max_model_input_sizes.keys() ) else: _lowerCamelCase : Optional[Any] = [checkpoint_name] logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer _lowerCamelCase : Dict = tokenizer_class.from_pretrained(_lowerCamelCase , force_download=_lowerCamelCase ) # Save fast tokenizer logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: _lowerCamelCase, _lowerCamelCase : Optional[int] = checkpoint.split("/" ) _lowerCamelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase ) elif add_prefix: _lowerCamelCase : Dict = checkpoint _lowerCamelCase : Optional[Any] = dump_path else: _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Optional[int] = dump_path logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: _lowerCamelCase : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] _lowerCamelCase : Tuple = file_path.split(_lowerCamelCase )[-1][0] if next_char == "/": _lowerCamelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : int = None logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) _lowerCamelCase : int = tokenizer.save_pretrained( _lowerCamelCase , legacy_format=_lowerCamelCase , filename_prefix=_lowerCamelCase ) logger.info(F"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith("tokenizer.json" ): os.remove(_lowerCamelCase ) logger.info(F"""=> removing {file_name}""" ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.''' ) parser.add_argument( '''--tokenizer_name''', default=None, type=str, help=( f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' '''download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--checkpoint_name''', default=None, type=str, help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''', ) parser.add_argument( '''--force_download''', action='''store_true''', help='''Re-download checkpoints.''', ) _lowerCAmelCase : Any = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
340
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
1
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py _lowerCAmelCase : List[str] = '''src/transformers''' _lowerCAmelCase : Tuple = '''docs/source/en/tasks''' def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f: _lowerCamelCase : List[Any] = f.readlines() # Find the start prompt. _lowerCamelCase : Optional[Any] = 0 while not lines[start_index].startswith(_lowerCamelCase ): start_index += 1 start_index += 1 _lowerCamelCase : Optional[Any] = start_index while not lines[end_index].startswith(_lowerCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. _lowerCAmelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH) _lowerCAmelCase : int = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). _lowerCAmelCase : Union[str, Any] = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = TASK_GUIDE_TO_MODELS[task_guide] _lowerCamelCase : Tuple = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCamelCase , set() ) _lowerCamelCase : str = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> Dict: '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = _find_text_in_file( filename=os.path.join(_lowerCamelCase , _lowerCamelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) _lowerCamelCase : List[Any] = get_model_list_for_task(_lowerCamelCase ) if current_list != new_list: if overwrite: with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" " to fix this." ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _lowerCAmelCase : str = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
340
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class A_ ( unittest.TestCase ): def _lowercase ( self: int ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Dict = 1 _lowerCamelCase : Tuple = 3 _lowerCamelCase : Union[str, Any] = (32, 32) _lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__lowerCAmelCase ) return image @property def _lowercase ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=__lowerCAmelCase ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,) return model @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = AutoencoderKL( block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) return model @property def _lowercase ( self: Tuple ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="gelu" ,projection_dim=512 ,) return CLIPTextModel(__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _lowerCamelCase : List[str] = self.dummy_cond_unet_upscale _lowerCamelCase : Dict = DDPMScheduler() _lowerCamelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" ) _lowerCamelCase : List[Any] = self.dummy_vae _lowerCamelCase : List[Any] = self.dummy_text_encoder _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _lowerCamelCase : Tuple = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0] _lowerCamelCase : Dict = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCamelCase : Any = StableDiffusionUpscalePipeline( unet=__lowerCAmelCase ,low_res_scheduler=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,max_noise_level=350 ,) _lowerCamelCase : int = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : str = "A painting of a squirrel eating a burger" _lowerCamelCase : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 ) _lowerCamelCase : Union[str, Any] = sd_pipe( [prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,) _lowerCamelCase : Any = output.images _lowerCamelCase : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 ) _lowerCamelCase : str = sd_pipe( [prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,return_dict=__lowerCAmelCase ,)[0] _lowerCamelCase : str = image[0, -3:, -3:, -1] _lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] _lowerCamelCase : Dict = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCamelCase : Union[str, Any] = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator _lowerCamelCase : Union[str, Any] = self.dummy_cond_unet_upscale _lowerCamelCase : Any = DDPMScheduler() _lowerCamelCase : List[str] = DDIMScheduler(prediction_type="v_prediction" ) _lowerCamelCase : List[Any] = self.dummy_vae _lowerCamelCase : int = self.dummy_text_encoder _lowerCamelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _lowerCamelCase : List[Any] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0] _lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCamelCase : str = StableDiffusionUpscalePipeline( unet=__lowerCAmelCase ,low_res_scheduler=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,max_noise_level=350 ,) _lowerCamelCase : List[str] = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Tuple = "A painting of a squirrel eating a burger" _lowerCamelCase : str = sd_pipe( 2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,) _lowerCamelCase : Dict = output.images assert image.shape[0] == 2 _lowerCamelCase : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 ) _lowerCamelCase : Any = sd_pipe( [prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,) _lowerCamelCase : List[Any] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.dummy_cond_unet_upscale _lowerCamelCase : Tuple = DDPMScheduler() _lowerCamelCase : Optional[Any] = DDIMScheduler(prediction_type="v_prediction" ) _lowerCamelCase : int = self.dummy_vae _lowerCamelCase : Any = self.dummy_text_encoder _lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _lowerCamelCase : Union[str, Any] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0] _lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCamelCase : Dict = unet.half() _lowerCamelCase : Tuple = text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCamelCase : Optional[Any] = StableDiffusionUpscalePipeline( unet=__lowerCAmelCase ,low_res_scheduler=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,max_noise_level=350 ,) _lowerCamelCase : str = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = "A painting of a squirrel eating a burger" _lowerCamelCase : List[str] = torch.manual_seed(0 ) _lowerCamelCase : List[Any] = sd_pipe( [prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="np" ,).images _lowerCamelCase : str = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class A_ ( unittest.TestCase ): def _lowercase ( self: Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) _lowerCamelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) _lowerCamelCase : List[str] = "stabilityai/stable-diffusion-x4-upscaler" _lowerCamelCase : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() _lowerCamelCase : Dict = "a cat sitting on a park bench" _lowerCamelCase : Any = torch.manual_seed(0 ) _lowerCamelCase : Dict = pipe( prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,output_type="np" ,) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) _lowerCamelCase : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) _lowerCamelCase : str = "stabilityai/stable-diffusion-x4-upscaler" _lowerCamelCase : int = StableDiffusionUpscalePipeline.from_pretrained( __lowerCAmelCase ,torch_dtype=torch.floataa ,) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() _lowerCamelCase : List[Any] = "a cat sitting on a park bench" _lowerCamelCase : Any = torch.manual_seed(0 ) _lowerCamelCase : Optional[Any] = pipe( prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,output_type="np" ,) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowercase ( self: Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCamelCase : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) _lowerCamelCase : List[str] = "stabilityai/stable-diffusion-x4-upscaler" _lowerCamelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained( __lowerCAmelCase ,torch_dtype=torch.floataa ,) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCamelCase : List[str] = "a cat sitting on a park bench" _lowerCamelCase : List[str] = torch.manual_seed(0 ) _lowerCamelCase : str = pipe( prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=5 ,output_type="np" ,) _lowerCamelCase : int = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _lowerCAmelCase : Any = None _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : int = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : int = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''', }, } _lowerCAmelCase : Tuple = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } _lowerCAmelCase : Optional[int] = '''▁''' # Segments (not really needed) _lowerCAmelCase : Union[str, Any] = 0 _lowerCAmelCase : Tuple = 1 _lowerCAmelCase : List[str] = 2 _lowerCAmelCase : Union[str, Any] = 3 _lowerCAmelCase : Optional[int] = 4 class A_ ( _a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = 'left' lowerCAmelCase__ = XLNetTokenizer def __init__( self: Tuple ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[int]=False ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: List[str]=False ,__lowerCAmelCase: Optional[Any]="<s>" ,__lowerCAmelCase: Optional[Any]="</s>" ,__lowerCAmelCase: Union[str, Any]="<unk>" ,__lowerCAmelCase: Tuple="<sep>" ,__lowerCAmelCase: Dict="<pad>" ,__lowerCAmelCase: Union[str, Any]="<cls>" ,__lowerCAmelCase: Optional[int]="<mask>" ,__lowerCAmelCase: Tuple=["<eop>", "<eod>"] ,**__lowerCAmelCase: Optional[int] ,): '''simple docstring''' _lowerCamelCase : Dict = AddedToken(__lowerCAmelCase ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else mask_token super().__init__( vocab_file=__lowerCAmelCase ,tokenizer_file=__lowerCAmelCase ,do_lower_case=__lowerCAmelCase ,remove_space=__lowerCAmelCase ,keep_accents=__lowerCAmelCase ,bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,additional_special_tokens=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Dict = 3 _lowerCamelCase : int = do_lower_case _lowerCamelCase : int = remove_space _lowerCamelCase : List[Any] = keep_accents _lowerCamelCase : Dict = vocab_file _lowerCamelCase : Dict = False if not self.vocab_file else True def _lowercase ( self: Dict ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ): '''simple docstring''' _lowerCamelCase : Optional[Any] = [self.sep_token_id] _lowerCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self: int ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ): '''simple docstring''' _lowerCamelCase : Tuple = [self.sep_token_id] _lowerCamelCase : List[str] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowerCamelCase : Union[str, Any] = os.path.join( __lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ): copyfile(self.vocab_file ,__lowerCAmelCase ) return (out_vocab_file,)
340
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
1
"""simple docstring""" import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=False , **_lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : str = AutoTokenizer.from_pretrained(_lowerCamelCase ) _lowerCamelCase : Optional[int] = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path="train" , **_lowerCamelCase ) _lowerCamelCase : Tuple = tok.pad_token_id def get_lens(_lowerCamelCase ): _lowerCamelCase : Tuple = tqdm( DataLoader(_lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _lowerCamelCase : int = [] for batch in dl: _lowerCamelCase : List[str] = batch["input_ids"].ne(_lowerCamelCase ).sum(1 ).tolist() _lowerCamelCase : Tuple = batch["labels"].ne(_lowerCamelCase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(_lowerCamelCase , _lowerCamelCase ): max_lens.append(max(_lowerCamelCase , _lowerCamelCase ) ) else: max_lens.extend(_lowerCamelCase ) return max_lens _lowerCamelCase : Optional[Any] = get_lens(_lowerCamelCase ) _lowerCamelCase : Tuple = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path="val" , **_lowerCamelCase ) _lowerCamelCase : Optional[Any] = get_lens(_lowerCamelCase ) pickle_save(_lowerCamelCase , train_ds.len_file ) pickle_save(_lowerCamelCase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
1
"""simple docstring""" import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = OmegaConf.load(_lowerCamelCase ) if display: print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) ) return config def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' if conf_path is None: _lowerCamelCase : int = "./model_checkpoints/vqgan_only.yaml" _lowerCamelCase : Optional[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase ) _lowerCamelCase : List[Any] = VQModel(**config.model.params ) if ckpt_path is None: _lowerCamelCase : str = "./model_checkpoints/vqgan_only.pt" _lowerCamelCase : Optional[Any] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase ) if ".ckpt" in ckpt_path: _lowerCamelCase : Optional[Any] = sd["state_dict"] model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) model.to(_lowerCamelCase ) del sd return model def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = model.encode(_lowerCamelCase ) print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" ) _lowerCamelCase : List[str] = model.decode(_lowerCamelCase ) return xrec def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> str: '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = string.rsplit("." , 1 ) if reload: _lowerCamelCase : int = importlib.import_module(_lowerCamelCase ) importlib.reload(_lowerCamelCase ) return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls ) def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if "target" not in config: raise KeyError("Expected key `target` to instantiate." ) return get_obj_from_str(config["target"] )(**config.get("params" , {} ) ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ) -> Dict: '''simple docstring''' _lowerCamelCase : Optional[Any] = instantiate_from_config(_lowerCamelCase ) if sd is not None: model.load_state_dict(_lowerCamelCase ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: '''simple docstring''' if ckpt: _lowerCamelCase : Union[str, Any] = torch.load(_lowerCamelCase , map_location="cpu" ) _lowerCamelCase : Dict = pl_sd["global_step"] print(F"""loaded model from global step {global_step}.""" ) else: _lowerCamelCase : List[Any] = {"state_dict": None} _lowerCamelCase : List[Any] = None _lowerCamelCase : Optional[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"] return model, global_step
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class A_ ( unittest.TestCase ): lowerCAmelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Tuple = hf_hub_download( repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" ) _lowerCamelCase : List[Any] = VideoClassificationPipeline(model=__lowerCAmelCase ,image_processor=__lowerCAmelCase ,top_k=2 ) _lowerCamelCase : Tuple = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' for example in examples: _lowerCamelCase : int = video_classifier(__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase ,[ {"score": ANY(__lowerCAmelCase ), "label": ANY(__lowerCAmelCase )}, {"score": ANY(__lowerCAmelCase ), "label": ANY(__lowerCAmelCase )}, ] ,) @require_torch def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Dict = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" _lowerCamelCase : str = VideoMAEFeatureExtractor( size={"shortest_edge": 10} ,crop_size={"height": 10, "width": 10} ) _lowerCamelCase : Dict = pipeline( "video-classification" ,model=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,frame_sampling_rate=4 ) _lowerCamelCase : int = hf_hub_download(repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" ) _lowerCamelCase : List[Any] = video_classifier(__lowerCAmelCase ,top_k=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ,decimals=4 ) ,[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] ,) _lowerCamelCase : Tuple = video_classifier( [ video_file_path, video_file_path, ] ,top_k=2 ,) self.assertEqual( nested_simplify(__lowerCAmelCase ,decimals=4 ) ,[ [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}], [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}], ] ,) @require_tf def _lowercase ( self: Dict ): '''simple docstring''' pass
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass _lowerCAmelCase : str = (3, 9, -11, 0, 7, 5, 1, -1) _lowerCAmelCase : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class A_ : lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 class A_ : def __init__( self: int ,__lowerCAmelCase: Iterable[int] ): '''simple docstring''' _lowerCamelCase : Node | None = None for i in sorted(__lowerCAmelCase ,reverse=__lowerCAmelCase ): _lowerCamelCase : List[str] = Node(__lowerCAmelCase ,self.head ) def __iter__( self: List[str] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.head while node: yield node.data _lowerCamelCase : Tuple = node.next_node def __len__( self: Optional[Any] ): '''simple docstring''' return sum(1 for _ in self ) def __str__( self: Dict ): '''simple docstring''' return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(_lowerCamelCase ) + list(_lowerCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Any = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'linear' lowerCAmelCase__ = 'cosine' lowerCAmelCase__ = 'cosine_with_restarts' lowerCAmelCase__ = 'polynomial' lowerCAmelCase__ = 'constant' lowerCAmelCase__ = 'constant_with_warmup' lowerCAmelCase__ = 'piecewise_constant' def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = -1 ) -> List[str]: '''simple docstring''' return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = -1 ) -> int: '''simple docstring''' def lr_lambda(_lowerCamelCase ): if current_step < num_warmup_steps: return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) ) return 1.0 return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = -1 ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[Any] = step_rules.split("," ) for rule_str in rule_list[:-1]: _lowerCamelCase, _lowerCamelCase : Optional[Any] = rule_str.split(":" ) _lowerCamelCase : int = int(_lowerCamelCase ) _lowerCamelCase : Optional[Any] = float(_lowerCamelCase ) _lowerCamelCase : Optional[int] = value _lowerCamelCase : str = float(rule_list[-1] ) def create_rules_function(_lowerCamelCase , _lowerCamelCase ): def rule_func(_lowerCamelCase ) -> float: _lowerCamelCase : List[Any] = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(_lowerCamelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func _lowerCamelCase : List[str] = create_rules_function(_lowerCamelCase , _lowerCamelCase ) return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=-1 ) -> Union[str, Any]: '''simple docstring''' def lr_lambda(_lowerCamelCase ): if current_step < num_warmup_steps: return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.5 , _lowerCamelCase = -1 ) -> Union[str, Any]: '''simple docstring''' def lr_lambda(_lowerCamelCase ): if current_step < num_warmup_steps: return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) ) _lowerCamelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) ) return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 ) -> Any: '''simple docstring''' def lr_lambda(_lowerCamelCase ): if current_step < num_warmup_steps: return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) ) _lowerCamelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) ) return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-7 , _lowerCamelCase=1.0 , _lowerCamelCase=-1 ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : int = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(_lowerCamelCase ): if current_step < num_warmup_steps: return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: _lowerCamelCase : Any = lr_init - lr_end _lowerCamelCase : Union[str, Any] = num_training_steps - num_warmup_steps _lowerCamelCase : List[Any] = 1 - (current_step - num_warmup_steps) / decay_steps _lowerCamelCase : Optional[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : Dict = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 1.0 , _lowerCamelCase = -1 , ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = SchedulerType(_lowerCamelCase ) _lowerCamelCase : Any = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( _lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( _lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , ) return schedule_func( _lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
340
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
1
"""simple docstring""" import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' assert isinstance(_lowerCamelCase , _lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = tmp_path / "cache" _lowerCamelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCamelCase : Tuple = SqlDatasetReader( "dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read() _check_sql_dataset(_lowerCamelCase , _lowerCamelCase ) @require_sqlalchemy @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : Optional[int] = tmp_path / "cache" _lowerCamelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowerCamelCase : Tuple = features.copy() if features else default_expected_features _lowerCamelCase : Dict = ( Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCamelCase : Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read() _check_sql_dataset(_lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> Dict: '''simple docstring''' with contextlib.closing(sqlitea.connect(_lowerCamelCase ) ) as con: _lowerCamelCase : Any = con.cursor() cur.execute("SELECT * FROM dataset" ) for row in cur: yield row @require_sqlalchemy def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = tmp_path / "cache" _lowerCamelCase : Optional[int] = os.path.join(_lowerCamelCase , "tmp.sql" ) _lowerCamelCase : Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCamelCase ).read() SqlDatasetWriter(_lowerCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write() _lowerCamelCase : List[str] = iter_sql_file(_lowerCamelCase ) _lowerCamelCase : List[Any] = iter_sql_file(_lowerCamelCase ) for rowa, rowa in zip(_lowerCamelCase , _lowerCamelCase ): assert rowa == rowa @require_sqlalchemy def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Optional[Any] = tmp_path / "cache" _lowerCamelCase : Dict = os.path.join(_lowerCamelCase , "tmp.sql" ) _lowerCamelCase : List[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCamelCase ).read() SqlDatasetWriter(_lowerCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write() _lowerCamelCase : Any = iter_sql_file(_lowerCamelCase ) _lowerCamelCase : Optional[Any] = iter_sql_file(_lowerCamelCase ) for rowa, rowa in zip(_lowerCamelCase , _lowerCamelCase ): assert rowa == rowa @require_sqlalchemy def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Dict = tmp_path / "cache" _lowerCamelCase : List[Any] = os.path.join(_lowerCamelCase , "tmp.sql" ) _lowerCamelCase : Dict = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCamelCase ).read() with pytest.raises(_lowerCamelCase ): SqlDatasetWriter(_lowerCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
340
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
1
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
"""simple docstring""" def lowerCamelCase_( ) -> int: '''simple docstring''' return 1 def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase = 200 ) -> int: '''simple docstring''' return two_pound(_lowerCamelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
340
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
1
"""simple docstring""" import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class A_ ( _a ): def __init__( self: Tuple ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: Any ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Optional[int] = field _lowerCamelCase : Dict = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Dict = Json( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,field=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: List[str] ): '''simple docstring''' if self.streaming: _lowerCamelCase : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Any = None _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : Tuple = None _lowerCamelCase : int = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Optional[Any] = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: str ,): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _lowerCamelCase : List[Any] = dataset _lowerCamelCase : Optional[Any] = path_or_buf _lowerCamelCase : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _lowerCamelCase : Dict = num_proc _lowerCamelCase : List[Any] = "utf-8" _lowerCamelCase : Any = to_json_kwargs def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = self.to_json_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = self.to_json_kwargs.pop("orient" ,"records" ) _lowerCamelCase : Optional[Any] = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False ) _lowerCamelCase : Union[str, Any] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True ) _lowerCamelCase : int = self.to_json_kwargs.pop("compression" ,__lowerCAmelCase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf ,"wb" ,compression=__lowerCAmelCase ) as buffer: _lowerCamelCase : Union[str, Any] = self._write(file_obj=__lowerCAmelCase ,orient=__lowerCAmelCase ,lines=__lowerCAmelCase ,index=__lowerCAmelCase ,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" " was passed. Please provide a local path instead." ) _lowerCamelCase : List[str] = self._write( file_obj=self.path_or_buf ,orient=__lowerCAmelCase ,lines=__lowerCAmelCase ,index=__lowerCAmelCase ,**self.to_json_kwargs ) return written def _lowercase ( self: int ,__lowerCAmelCase: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = args _lowerCamelCase : Optional[Any] = query_table( table=self.dataset.data ,key=slice(__lowerCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) _lowerCamelCase : str = batch.to_pandas().to_json( path_or_buf=__lowerCAmelCase ,orient=__lowerCAmelCase ,lines=__lowerCAmelCase ,index=__lowerCAmelCase ,**__lowerCAmelCase ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Dict ,): '''simple docstring''' _lowerCamelCase : Dict = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): _lowerCamelCase : int = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__lowerCAmelCase ) else: _lowerCamelCase, _lowerCamelCase : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,__lowerCAmelCase ,__lowerCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,): written += file_obj.write(__lowerCAmelCase ) return written
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
"""simple docstring""" from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand _lowerCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(_lowerCamelCase ): return ext raise Exception( F"""Unable to determine file format from file extension {path}. """ F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : str = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _lowerCamelCase : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format _lowerCamelCase : Optional[Any] = PipelineDataFormat.from_str( format=_lowerCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(_lowerCamelCase , _lowerCamelCase ) class A_ ( _a ): def __init__( self: Any ,__lowerCAmelCase: Pipeline ,__lowerCAmelCase: PipelineDataFormat ): '''simple docstring''' _lowerCamelCase : List[str] = nlp _lowerCamelCase : str = reader @staticmethod def _lowercase ( __lowerCAmelCase: ArgumentParser ): '''simple docstring''' _lowerCamelCase : List[str] = parser.add_parser("run" ,help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" ,choices=get_supported_tasks() ,help="Task to run" ) run_parser.add_argument("--input" ,type=__lowerCAmelCase ,help="Path to the file to use for inference" ) run_parser.add_argument("--output" ,type=__lowerCAmelCase ,help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" ,type=__lowerCAmelCase ,help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" ,type=__lowerCAmelCase ,help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" ,type=__lowerCAmelCase ,help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" ,type=__lowerCAmelCase ,help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" ,) run_parser.add_argument( "--format" ,type=__lowerCAmelCase ,default="infer" ,choices=PipelineDataFormat.SUPPORTED_FORMATS ,help="Input format to read from" ,) run_parser.add_argument( "--device" ,type=__lowerCAmelCase ,default=-1 ,help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" ,) run_parser.add_argument("--overwrite" ,action="store_true" ,help="Allow overwriting the output file." ) run_parser.set_defaults(func=__lowerCAmelCase ) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Dict = self._nlp, [] for entry in self._reader: _lowerCamelCase : List[str] = nlp(**__lowerCAmelCase ) if self._reader.is_multi_columns else nlp(__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): outputs.append(__lowerCAmelCase ) else: outputs += output # Saving data if self._nlp.binary_output: _lowerCamelCase : str = self._reader.save_binary(__lowerCAmelCase ) logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(__lowerCAmelCase )
340
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
1
"""simple docstring""" _lowerCAmelCase : Optional[Any] = [ '''DownloadConfig''', '''DownloadManager''', '''DownloadMode''', '''StreamingDownloadManager''', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
340
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
1
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def lowerCamelCase_( _lowerCamelCase ) -> float: '''simple docstring''' if num <= 0: raise ValueError("math domain error" ) return quad(_lowerCamelCase , 0 , _lowerCamelCase , args=(_lowerCamelCase) )[0] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' return math.pow(_lowerCamelCase , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
340
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _lowerCamelCase : int = precision _lowerCamelCase : Dict = ceil(precision / 14 ) _lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt() _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = 13591409 _lowerCamelCase : int = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return "\n".join( F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowerCamelCase_( ) -> Any: '''simple docstring''' _lowerCamelCase : Tuple = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png" _lowerCamelCase : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" ) return image def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : List[str] = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") ) # fmt: on return rename_keys def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Optional[Any] = dct.pop(_lowerCamelCase ) _lowerCamelCase : Tuple = val def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any: '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _lowerCamelCase : int = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) _lowerCamelCase : List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict _lowerCamelCase : Tuple = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) ) _lowerCamelCase : str = qkv_bias def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Dict = 364 if "coco" in model_name else 224 _lowerCamelCase : Union[str, Any] = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _lowerCamelCase : List[Any] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_lowerCamelCase ).to_dict() elif "opt-6.7b" in model_name: _lowerCamelCase : Optional[int] = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_lowerCamelCase ).to_dict() elif "t5-xl" in model_name: _lowerCamelCase : Optional[Any] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _lowerCamelCase : str = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() _lowerCamelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase ) return config, image_size @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ) -> Tuple: '''simple docstring''' _lowerCamelCase : Tuple = ( AutoTokenizer.from_pretrained("facebook/opt-2.7b" ) if "opt" in model_name else AutoTokenizer.from_pretrained("google/flan-t5-xl" ) ) _lowerCamelCase : List[str] = tokenizer("\n" , add_special_tokens=_lowerCamelCase ).input_ids[0] _lowerCamelCase, _lowerCamelCase : Optional[int] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase ) _lowerCamelCase : List[str] = BlipaForConditionalGeneration(_lowerCamelCase ).eval() _lowerCamelCase : Dict = { "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"), "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"), "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"), "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"), "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"), "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"), "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"), } _lowerCamelCase, _lowerCamelCase : Optional[Any] = model_name_to_original[model_name] # load original model print("Loading original model..." ) _lowerCamelCase : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu" _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = load_model_and_preprocess( name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase ) original_model.eval() print("Done!" ) # update state dict keys _lowerCamelCase : Tuple = original_model.state_dict() _lowerCamelCase : str = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _lowerCamelCase : Optional[Any] = state_dict.pop(_lowerCamelCase ) if key.startswith("Qformer.bert" ): _lowerCamelCase : str = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: _lowerCamelCase : Tuple = key.replace("self" , "attention" ) if "opt_proj" in key: _lowerCamelCase : Dict = key.replace("opt_proj" , "language_projection" ) if "t5_proj" in key: _lowerCamelCase : List[str] = key.replace("t5_proj" , "language_projection" ) if key.startswith("opt" ): _lowerCamelCase : int = key.replace("opt" , "language" ) if key.startswith("t5" ): _lowerCamelCase : Any = key.replace("t5" , "language" ) _lowerCamelCase : int = val # read in qv biases read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase, _lowerCamelCase : int = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) assert len(_lowerCamelCase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _lowerCamelCase : int = load_demo_image() _lowerCamelCase : List[Any] = vis_processors["eval"](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase ) _lowerCamelCase : Optional[int] = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_lowerCamelCase ) # create processor _lowerCamelCase : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase ) _lowerCamelCase : Dict = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase ) _lowerCamelCase : str = processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values.to(_lowerCamelCase ) # make sure processor creates exact same pixel values assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) original_model.to(_lowerCamelCase ) hf_model.to(_lowerCamelCase ) with torch.no_grad(): if "opt" in model_name: _lowerCamelCase : Tuple = original_model({"image": original_pixel_values, "text_input": [""]} ).logits _lowerCamelCase : List[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits else: _lowerCamelCase : Optional[int] = original_model( {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits _lowerCamelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) _lowerCamelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits assert original_logits.shape == logits.shape print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _lowerCamelCase : str = torch.tensor( [[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=_lowerCamelCase ) assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _lowerCamelCase : Optional[Any] = torch.tensor( [[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=_lowerCamelCase ) else: # cast to same type _lowerCamelCase : str = logits.dtype assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 ) print("Looks ok!" ) print("Generating a caption..." ) _lowerCamelCase : List[str] = "" _lowerCamelCase : Dict = tokenizer(_lowerCamelCase , return_tensors="pt" ).input_ids.to(_lowerCamelCase ) _lowerCamelCase : Optional[int] = original_model.generate({"image": original_pixel_values} ) _lowerCamelCase : Dict = hf_model.generate( _lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("Original generation:" , _lowerCamelCase ) _lowerCamelCase : List[Any] = input_ids.shape[1] _lowerCamelCase : Tuple = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase ) _lowerCamelCase : int = [text.strip() for text in output_text] print("HF generation:" , _lowerCamelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() _lowerCAmelCase : Dict = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) _lowerCAmelCase : Optional[Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1
"""simple docstring""" import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Tuple = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _lowerCAmelCase : List[str] = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } _lowerCAmelCase : Any = {'''facebook/blenderbot_small-90M''': 512} def lowerCamelCase_( _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = set() _lowerCamelCase : List[str] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _lowerCamelCase : List[Any] = char _lowerCamelCase : List[str] = set(_lowerCamelCase ) return pairs class A_ ( _a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] def __init__( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: str="__start__" ,__lowerCAmelCase: List[str]="__end__" ,__lowerCAmelCase: List[str]="__unk__" ,__lowerCAmelCase: str="__null__" ,**__lowerCAmelCase: Optional[Any] ,): '''simple docstring''' super().__init__(unk_token=__lowerCAmelCase ,bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,**__lowerCAmelCase ) with open(__lowerCAmelCase ,encoding="utf-8" ) as vocab_handle: _lowerCamelCase : Union[str, Any] = json.load(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()} with open(__lowerCAmelCase ,encoding="utf-8" ) as merges_handle: _lowerCamelCase : str = merges_handle.read().split("\n" )[1:-1] _lowerCamelCase : int = [tuple(merge.split() ) for merge in merges] _lowerCamelCase : List[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) ) _lowerCamelCase : int = {} @property def _lowercase ( self: List[str] ): '''simple docstring''' return len(self.encoder ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: str ): '''simple docstring''' if token in self.cache: return self.cache[token] _lowerCamelCase : Union[str, Any] = re.sub("([.,!?()])" ,r" \1" ,__lowerCAmelCase ) _lowerCamelCase : Dict = re.sub("(')" ,r" \1 " ,__lowerCAmelCase ) _lowerCamelCase : List[Any] = re.sub(r"\s{2,}" ," " ,__lowerCAmelCase ) if "\n" in token: _lowerCamelCase : int = token.replace("\n" ," __newln__" ) _lowerCamelCase : Optional[int] = token.split(" " ) _lowerCamelCase : Union[str, Any] = [] for token in tokens: if not len(__lowerCAmelCase ): continue _lowerCamelCase : Tuple = token.lower() _lowerCamelCase : Optional[Any] = tuple(__lowerCAmelCase ) _lowerCamelCase : Any = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) _lowerCamelCase : Dict = get_pairs(__lowerCAmelCase ) if not pairs: words.append(__lowerCAmelCase ) continue while True: _lowerCamelCase : Optional[int] = min(__lowerCAmelCase ,key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase ,float("inf" ) ) ) if bigram not in self.bpe_ranks: break _lowerCamelCase, _lowerCamelCase : Optional[int] = bigram _lowerCamelCase : Tuple = [] _lowerCamelCase : Tuple = 0 while i < len(__lowerCAmelCase ): try: _lowerCamelCase : List[str] = word.index(__lowerCAmelCase ,__lowerCAmelCase ) new_word.extend(word[i:j] ) _lowerCamelCase : int = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _lowerCamelCase : Dict = tuple(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = new_word if len(__lowerCAmelCase ) == 1: break else: _lowerCamelCase : Dict = get_pairs(__lowerCAmelCase ) _lowerCamelCase : Dict = "@@ ".join(__lowerCAmelCase ) _lowerCamelCase : Tuple = word[:-4] _lowerCamelCase : Optional[Any] = word words.append(__lowerCAmelCase ) return " ".join(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : int = [] _lowerCamelCase : Optional[Any] = re.findall(r"\S+\n?" ,__lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(" " ) ) ) return split_tokens def _lowercase ( self: Optional[int] ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : Any = token.lower() return self.encoder.get(__lowerCAmelCase ,self.encoder.get(self.unk_token ) ) def _lowercase ( self: int ,__lowerCAmelCase: int ): '''simple docstring''' return self.decoder.get(__lowerCAmelCase ,self.unk_token ) def _lowercase ( self: int ,__lowerCAmelCase: List[str] ): '''simple docstring''' _lowerCamelCase : Dict = " ".join(__lowerCAmelCase ).replace("@@ " ,"" ).strip() return out_string def _lowercase ( self: Any ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowerCamelCase : Optional[int] = os.path.join( __lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) _lowerCamelCase : List[Any] = os.path.join( __lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCAmelCase ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__lowerCAmelCase ,ensure_ascii=__lowerCAmelCase ) + "\n" ) _lowerCamelCase : int = 0 with open(__lowerCAmelCase ,"w" ,encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) _lowerCamelCase : Optional[int] = token_index writer.write(" ".join(__lowerCAmelCase ) + "\n" ) index += 1 return vocab_file, merge_file
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
1
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
1
"""simple docstring""" import os import string import sys _lowerCAmelCase : Dict = 1 << 8 _lowerCAmelCase : Union[str, Any] = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 27, '''up''': 65 + ARROW_KEY_FLAG, '''down''': 66 + ARROW_KEY_FLAG, '''right''': 67 + ARROW_KEY_FLAG, '''left''': 68 + ARROW_KEY_FLAG, '''mod_int''': 91, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 50, '''delete''': 51, '''pg_up''': 53, '''pg_down''': 54, } _lowerCAmelCase : Optional[Any] = KEYMAP['''up'''] _lowerCAmelCase : Optional[Any] = KEYMAP['''left'''] if sys.platform == "win32": _lowerCAmelCase : int = [] _lowerCAmelCase : Optional[Any] = { B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(10): _lowerCAmelCase : Tuple = ord(str(i)) def lowerCamelCase_( ) -> Any: '''simple docstring''' if os.name == "nt": import msvcrt _lowerCamelCase : Dict = "mbcs" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_lowerCamelCase ) == 0: # Read the keystroke _lowerCamelCase : Dict = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCamelCase : int = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCamelCase : Optional[Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) ) WIN_CH_BUFFER.append(_lowerCamelCase ) if ord(_lowerCamelCase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _lowerCamelCase : Tuple = chr(KEYMAP["esc"] ) except KeyError: _lowerCamelCase : str = cha[1] else: _lowerCamelCase : int = ch.decode(_lowerCamelCase ) else: _lowerCamelCase : List[Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCamelCase : Optional[int] = sys.stdin.fileno() _lowerCamelCase : Tuple = termios.tcgetattr(_lowerCamelCase ) try: tty.setraw(_lowerCamelCase ) _lowerCamelCase : Dict = sys.stdin.read(1 ) finally: termios.tcsetattr(_lowerCamelCase , termios.TCSADRAIN , _lowerCamelCase ) return ch def lowerCamelCase_( ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = get_raw_chars() if ord(_lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_lowerCamelCase ) == KEYMAP["esc"]: _lowerCamelCase : Optional[Any] = get_raw_chars() if ord(_lowerCamelCase ) == KEYMAP["mod_int"]: _lowerCamelCase : Tuple = get_raw_chars() if ord(_lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_lowerCamelCase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
340
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
1
"""simple docstring""" from ....utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: Optional[Any]=2_048 ): '''simple docstring''' _lowerCamelCase : str = config.__dict__ _lowerCamelCase : Any = modal_hidden_size if num_labels: _lowerCamelCase : Optional[int] = num_labels
340
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
1
"""simple docstring""" from __future__ import annotations import bisect def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ) -> int: '''simple docstring''' if hi < 0: _lowerCamelCase : Dict = len(_lowerCamelCase ) while lo < hi: _lowerCamelCase : List[str] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: _lowerCamelCase : Union[str, Any] = mid + 1 else: _lowerCamelCase : int = mid return lo def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ) -> int: '''simple docstring''' if hi < 0: _lowerCamelCase : Union[str, Any] = len(_lowerCamelCase ) while lo < hi: _lowerCamelCase : int = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: _lowerCamelCase : Union[str, Any] = mid + 1 else: _lowerCamelCase : Optional[int] = mid return lo def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int | None: '''simple docstring''' _lowerCamelCase : Tuple = 0 _lowerCamelCase : Tuple = len(_lowerCamelCase ) - 1 while left <= right: _lowerCamelCase : Tuple = left + (right - left) // 2 _lowerCamelCase : Union[str, Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: _lowerCamelCase : List[str] = midpoint - 1 else: _lowerCamelCase : Any = midpoint + 1 return None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int | None: '''simple docstring''' _lowerCamelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase ) if index != len(_lowerCamelCase ) and sorted_collection[index] == item: return index return None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int | None: '''simple docstring''' if right < left: return None _lowerCamelCase : int = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 ) else: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : int = input('''Enter numbers separated by comma:\n''').strip() _lowerCAmelCase : Any = sorted(int(item) for item in user_input.split(''',''')) _lowerCAmelCase : Optional[Any] = int(input('''Enter a single number to be found in the list:\n''')) _lowerCAmelCase : Optional[Any] = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
340
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
1
"""simple docstring""" import argparse import os import re _lowerCAmelCase : int = '''src/diffusers''' # Pattern that looks at the indentation in a line. _lowerCAmelCase : Optional[int] = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. _lowerCAmelCase : Any = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _lowerCAmelCase : str = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. _lowerCAmelCase : str = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _lowerCAmelCase : Dict = re.compile(R'''\[([^\]]+)\]''') def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = _re_indent.search(_lowerCamelCase ) return "" if search is None else search.groups()[0] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase=None , _lowerCamelCase=None ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Tuple = 0 _lowerCamelCase : List[Any] = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(_lowerCamelCase ): index += 1 _lowerCamelCase : Union[str, Any] = ["\n".join(lines[:index] )] else: _lowerCamelCase : Optional[Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCamelCase : int = [lines[index]] index += 1 while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(_lowerCamelCase ) ) if index < len(_lowerCamelCase ) - 1: _lowerCamelCase : str = [lines[index + 1]] index += 1 else: _lowerCamelCase : Dict = [] else: blocks.append("\n".join(_lowerCamelCase ) ) _lowerCamelCase : Tuple = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowerCamelCase ) > 0: blocks.append("\n".join(_lowerCamelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCamelCase ): blocks.append("\n".join(lines[index:] ) ) return blocks def lowerCamelCase_( _lowerCamelCase ) -> Dict: '''simple docstring''' def _inner(_lowerCamelCase ): return key(_lowerCamelCase ).lower().replace("_" , "" ) return _inner def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> List[str]: '''simple docstring''' def noop(_lowerCamelCase ): return x if key is None: _lowerCamelCase : Tuple = noop # Constants are all uppercase, they go first. _lowerCamelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCamelCase : Optional[int] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()] # Functions begin with a lowercase, they go last. _lowerCamelCase : Any = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()] _lowerCamelCase : Tuple = ignore_underscore(_lowerCamelCase ) return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' def _replace(_lowerCamelCase ): _lowerCamelCase : Any = match.groups()[0] if "," not in imports: return F"""[{imports}]""" _lowerCamelCase : Optional[Any] = [part.strip().replace("\"" , "" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCamelCase : List[str] = keys[:-1] return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] ) + "]" _lowerCamelCase : int = import_statement.split("\n" ) if len(_lowerCamelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCamelCase : int = 2 if lines[1].strip() == "[" else 1 _lowerCamelCase : Any = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCamelCase : Union[str, Any] = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] ) _lowerCamelCase : Dict = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowerCamelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCamelCase : List[str] = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCamelCase : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCamelCase : Optional[Any] = keys[:-1] _lowerCamelCase : Optional[Any] = get_indent(lines[1] ) + ", ".join([F"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] ) return "\n".join(_lowerCamelCase ) else: # Finally we have to deal with imports fitting on one line _lowerCamelCase : Any = _re_bracket_content.sub(_replace , _lowerCamelCase ) return import_statement def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=True ) -> int: '''simple docstring''' with open(_lowerCamelCase , "r" ) as f: _lowerCamelCase : int = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCamelCase : Optional[Any] = split_code_in_indented_blocks( _lowerCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCamelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCamelCase : str = main_blocks[block_idx] _lowerCamelCase : Optional[Any] = block.split("\n" ) # Get to the start of the imports. _lowerCamelCase : Any = 0 while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCamelCase : Dict = len(_lowerCamelCase ) else: line_idx += 1 if line_idx >= len(_lowerCamelCase ): continue # Ignore beginning and last line: they don't contain anything. _lowerCamelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] ) _lowerCamelCase : str = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCamelCase : Union[str, Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCamelCase : Optional[Any] = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCamelCase : Dict = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCamelCase : Any = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None] _lowerCamelCase : Tuple = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCamelCase : Any = 0 _lowerCamelCase : str = [] for i in range(len(_lowerCamelCase ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: _lowerCamelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_lowerCamelCase ) count += 1 # And we put our main block back together with its first and last line. _lowerCamelCase : str = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_lowerCamelCase ): if check_only: return True else: print(F"""Overwriting {file}.""" ) with open(_lowerCamelCase , "w" ) as f: f.write("\n".join(_lowerCamelCase ) ) def lowerCamelCase_( _lowerCamelCase=True ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : int = [] for root, _, files in os.walk(_lowerCamelCase ): if "__init__.py" in files: _lowerCamelCase : Tuple = sort_imports(os.path.join(_lowerCamelCase , "__init__.py" ) , check_only=_lowerCamelCase ) if result: _lowerCamelCase : int = [os.path.join(_lowerCamelCase , "__init__.py" )] if len(_lowerCamelCase ) > 0: raise ValueError(F"""Would overwrite {len(_lowerCamelCase )} files, run `make style`.""" ) if __name__ == "__main__": _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') _lowerCAmelCase : Dict = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
340
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' return "".join(chr(ord(_lowerCamelCase ) - 32 ) if "a" <= char <= "z" else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A_ ( unittest.TestCase ): def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[str] = tempfile.mkdtemp() # fmt: off _lowerCamelCase : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on _lowerCamelCase : Any = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) ) _lowerCamelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] _lowerCamelCase : Tuple = {"unk_token": "<unk>"} _lowerCamelCase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) _lowerCamelCase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCAmelCase ) ) _lowerCamelCase : str = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], "image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname ,__lowerCAmelCase ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: Dict ,**__lowerCAmelCase: str ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: Tuple ,**__lowerCAmelCase: int ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,**__lowerCAmelCase: List[Any] ): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: str ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] _lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : str = self.get_rust_tokenizer() _lowerCamelCase : Any = self.get_image_processor() _lowerCamelCase : Optional[int] = CLIPProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=__lowerCAmelCase ) _lowerCamelCase : str = CLIPProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _lowerCamelCase : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer ,__lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor ,__lowerCAmelCase ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Tuple = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowerCamelCase : List[str] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) _lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__lowerCAmelCase ,padding_value=1.0 ) _lowerCamelCase : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=__lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[int] = CLIPProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : Tuple = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(__lowerCAmelCase ,return_tensors="np" ) _lowerCamelCase : str = processor(images=__lowerCAmelCase ,return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : Any = self.get_tokenizer() _lowerCamelCase : Tuple = CLIPProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : str = "lower newer" _lowerCamelCase : Any = processor(text=__lowerCAmelCase ) _lowerCamelCase : Dict = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[int] = CLIPProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : List[Any] = "lower newer" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : List[Any] = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : str = CLIPProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : int = processor.batch_decode(__lowerCAmelCase ) _lowerCamelCase : Any = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : List[str] = CLIPProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = "lower newer" _lowerCamelCase : Optional[int] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
340
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
1
"""simple docstring""" from __future__ import annotations from random import random from typing import Generic, TypeVar _lowerCAmelCase : Union[str, Any] = TypeVar('''KT''') _lowerCAmelCase : Union[str, Any] = TypeVar('''VT''') class A_ ( Generic[KT, VT] ): def __init__( self: int ,__lowerCAmelCase: KT | str = "root" ,__lowerCAmelCase: VT | None = None ): '''simple docstring''' _lowerCamelCase : Optional[int] = key _lowerCamelCase : int = value _lowerCamelCase : list[Node[KT, VT]] = [] def __repr__( self: Dict ): '''simple docstring''' return F"""Node({self.key}: {self.value})""" @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return len(self.forward ) class A_ ( Generic[KT, VT] ): def __init__( self: str ,__lowerCAmelCase: float = 0.5 ,__lowerCAmelCase: int = 16 ): '''simple docstring''' _lowerCamelCase : Node[KT, VT] = Node[KT, VT]() _lowerCamelCase : List[str] = 0 _lowerCamelCase : Tuple = p _lowerCamelCase : Optional[Any] = max_level def __str__( self: str ): '''simple docstring''' _lowerCamelCase : Tuple = list(self ) if len(__lowerCAmelCase ) == 0: return F"""SkipList(level={self.level})""" _lowerCamelCase : Optional[int] = max((len(str(__lowerCAmelCase ) ) for item in items) ,default=4 ) _lowerCamelCase : Any = max(__lowerCAmelCase ,4 ) + 4 _lowerCamelCase : int = self.head _lowerCamelCase : Any = [] _lowerCamelCase : str = node.forward.copy() lines.append(F"""[{node.key}]""".ljust(__lowerCAmelCase ,"-" ) + "* " * len(__lowerCAmelCase ) ) lines.append(" " * label_size + "| " * len(__lowerCAmelCase ) ) while len(node.forward ) != 0: _lowerCamelCase : List[Any] = node.forward[0] lines.append( F"""[{node.key}]""".ljust(__lowerCAmelCase ,"-" ) + " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) ) lines.append(" " * label_size + "| " * len(__lowerCAmelCase ) ) _lowerCamelCase : int = node.forward lines.append("None".ljust(__lowerCAmelCase ) + "* " * len(__lowerCAmelCase ) ) return F"""SkipList(level={self.level})\n""" + "\n".join(__lowerCAmelCase ) def __iter__( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = self.head while len(node.forward ) != 0: yield node.forward[0].key _lowerCamelCase : Optional[int] = node.forward[0] def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = 1 while random() < self.p and level < self.max_level: level += 1 return level def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : List[str] = [] _lowerCamelCase : int = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: _lowerCamelCase : Optional[int] = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(__lowerCAmelCase ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: KT ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self._locate_node(__lowerCAmelCase ) if node is not None: for i, update_node in enumerate(__lowerCAmelCase ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: _lowerCamelCase : int = node.forward[i] else: _lowerCamelCase : Optional[int] = update_node.forward[:i] def _lowercase ( self: Tuple ,__lowerCAmelCase: KT ,__lowerCAmelCase: VT ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : int = self._locate_node(__lowerCAmelCase ) if node is not None: _lowerCamelCase : int = value else: _lowerCamelCase : Optional[int] = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 ,__lowerCAmelCase ): update_vector.append(self.head ) _lowerCamelCase : Any = level _lowerCamelCase : List[str] = Node(__lowerCAmelCase ,__lowerCAmelCase ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(__lowerCAmelCase ) else: _lowerCamelCase : Optional[int] = new_node def _lowercase ( self: Tuple ,__lowerCAmelCase: VT ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._locate_node(__lowerCAmelCase ) if node is not None: return node.value return None def lowerCamelCase_( ) -> int: '''simple docstring''' _lowerCamelCase : Union[str, Any] = SkipList() skip_list.insert("Key1" , 3 ) skip_list.insert("Key2" , 12 ) skip_list.insert("Key3" , 41 ) skip_list.insert("Key4" , -19 ) _lowerCamelCase : Union[str, Any] = skip_list.head _lowerCamelCase : List[Any] = {} while node.level != 0: _lowerCamelCase : Tuple = node.forward[0] _lowerCamelCase : Optional[Any] = node.value assert len(_lowerCamelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[int] = SkipList() skip_list.insert("Key1" , 10 ) skip_list.insert("Key1" , 12 ) skip_list.insert("Key5" , 7 ) skip_list.insert("Key7" , 10 ) skip_list.insert("Key10" , 5 ) skip_list.insert("Key7" , 7 ) skip_list.insert("Key5" , 5 ) skip_list.insert("Key10" , 10 ) _lowerCamelCase : List[str] = skip_list.head _lowerCamelCase : Dict = {} while node.level != 0: _lowerCamelCase : Union[str, Any] = node.forward[0] _lowerCamelCase : Tuple = node.value if len(_lowerCamelCase ) != 4: print() assert len(_lowerCamelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def lowerCamelCase_( ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Dict = SkipList() assert skip_list.find("Some key" ) is None def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : List[str] = SkipList() skip_list.insert("Key2" , 20 ) assert skip_list.find("Key2" ) == 20 skip_list.insert("Some Key" , 10 ) skip_list.insert("Key2" , 8 ) skip_list.insert("V" , 13 ) assert skip_list.find("Y" ) is None assert skip_list.find("Key2" ) == 8 assert skip_list.find("Some Key" ) == 10 assert skip_list.find("V" ) == 13 def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = SkipList() skip_list.delete("Some key" ) assert len(skip_list.head.forward ) == 0 def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Any = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("Key2" ) is None def lowerCamelCase_( ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Any = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) == 14 assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("X" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("Key1" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) == 15 skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) is None def lowerCamelCase_( ) -> Tuple: '''simple docstring''' _lowerCamelCase : Dict = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 142 ) skip_list.insert("Key2" , 15 ) skip_list.delete("X" ) def traverse_keys(_lowerCamelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_lowerCamelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowerCamelCase_( ) -> Any: '''simple docstring''' def is_sorted(_lowerCamelCase ): return all(next_item >= item for item, next_item in zip(_lowerCamelCase , lst[1:] ) ) _lowerCamelCase : Tuple = SkipList() for i in range(10 ): skip_list.insert(_lowerCamelCase , _lowerCamelCase ) assert is_sorted(list(_lowerCamelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_lowerCamelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_lowerCamelCase ) ) def lowerCamelCase_( ) -> List[Any]: '''simple docstring''' for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowerCamelCase_( ) -> Tuple: '''simple docstring''' _lowerCamelCase : Tuple = SkipList() skip_list.insert(2 , "2" ) skip_list.insert(4 , "4" ) skip_list.insert(6 , "4" ) skip_list.insert(4 , "5" ) skip_list.insert(8 , "4" ) skip_list.insert(9 , "4" ) skip_list.delete(4 ) print(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
1
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = False ) -> float: '''simple docstring''' if not arr: return 0 _lowerCamelCase : str = 0 if allow_empty_subarrays else float("-inf" ) _lowerCamelCase : List[Any] = 0.0 for num in arr: _lowerCamelCase : Union[str, Any] = max(0 if allow_empty_subarrays else num , curr_sum + num ) _lowerCamelCase : Optional[Any] = max(_lowerCamelCase , _lowerCamelCase ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() _lowerCAmelCase : List[Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(f'''{max_subarray_sum(nums) = }''')
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1