code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput a = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ) -> Union[str, Any]: '''simple docstring''' warnings.warn( 'The preprocess method is deprecated and will be removed in a future version. Please' ' use VaeImageProcessor.preprocess instead' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): _A = [image] if isinstance(image[0] , PIL.Image.Image ): _A , _A = image[0].size _A , _A = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 _A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] _A = np.concatenate(_snake_case , axis=0 ) _A = np.array(_snake_case ).astype(np.floataa ) / 255.0 _A = image.transpose(0 , 3 , 1 , 2 ) _A = 2.0 * image - 1.0 _A = torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): _A = torch.cat(_snake_case , dim=0 ) return image def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ) -> Tuple: '''simple docstring''' if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): _A = [mask] if isinstance(mask[0] , PIL.Image.Image ): _A , _A = mask[0].size _A , _A = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask] _A = np.concatenate(_snake_case , axis=0 ) _A = mask.astype(np.floataa ) / 255.0 _A = 0 _A = 1 _A = torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): _A = torch.cat(_snake_case , dim=0 ) return mask class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : UNetaDModel UpperCAmelCase : RePaintScheduler def __init__( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ): super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__( self : List[Any] , _UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCAmelCase : int = 250 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ): _A = image _A = _preprocess_image(_UpperCAmelCase ) _A = original_image.to(device=self.device , dtype=self.unet.dtype ) _A = _preprocess_mask(_UpperCAmelCase ) _A = mask_image.to(device=self.device , dtype=self.unet.dtype ) _A = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) _A = original_image.shape _A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.device ) _A = eta _A = self.scheduler.timesteps[0] + 1 _A = generator[0] if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual _A = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample # compute previous image: x_t -> x_t-1 _A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample else: # compute the reverse: x_t-1 -> x_t _A = self.scheduler.undo_step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = t _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCAmelCase )
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" import functools def _snake_case ( _snake_case : str , _snake_case : str ) -> int: '''simple docstring''' _A = len(_snake_case ) _A = len(_snake_case ) @functools.cache def min_distance(_snake_case : int , _snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa _A = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _snake_case ) , 1 + min_distance(_snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput a = 8 def _snake_case ( _snake_case : int , _snake_case : Union[str, Any]=BITS ) -> Dict: '''simple docstring''' _A = x.device _A = (x * 2_55).int().clamp(0 , 2_55 ) _A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_snake_case ) _A = rearrange(_snake_case , 'd -> d 1 1' ) _A = rearrange(_snake_case , 'b c h w -> b c 1 h w' ) _A = ((x & mask) != 0).float() _A = rearrange(_snake_case , 'b c d h w -> b (c d) h w' ) _A = bits * 2 - 1 return bits def _snake_case ( _snake_case : int , _snake_case : Optional[int]=BITS ) -> Any: '''simple docstring''' _A = x.device _A = (x > 0).int() _A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_snake_case , dtype=torch.intaa ) _A = rearrange(_snake_case , 'd -> d 1 1' ) _A = rearrange(_snake_case , 'b (c d) h w -> b c d h w' , d=8 ) _A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 2_55).clamp(0.0 , 1.0 ) def _snake_case ( self : List[Any] , _snake_case : torch.FloatTensor , _snake_case : int , _snake_case : torch.FloatTensor , _snake_case : float = 0.0 , _snake_case : bool = True , _snake_case : int=None , _snake_case : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: '''simple docstring''' if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _A = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _A = self.alphas_cumprod[timestep] _A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _A = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _A = self.bit_scale if self.config.clip_sample: _A = torch.clamp(_snake_case , -scale , _snake_case ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _A = self._get_variance(_snake_case , _snake_case ) _A = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _A = model_output.device if torch.is_tensor(_snake_case ) else 'cpu' _A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_snake_case ).to(_snake_case ) _A = self._get_variance(_snake_case , _snake_case ) ** 0.5 * eta * noise _A = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case ) def _snake_case ( self : Any , _snake_case : torch.FloatTensor , _snake_case : int , _snake_case : torch.FloatTensor , _snake_case : int="epsilon" , _snake_case : Union[str, Any]=None , _snake_case : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]: '''simple docstring''' _A = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _A , _A = torch.split(_snake_case , sample.shape[1] , dim=1 ) else: _A = None # 1. compute alphas, betas _A = self.alphas_cumprod[t] _A = self.alphas_cumprod[t - 1] if t > 0 else self.one _A = 1 - alpha_prod_t _A = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _A = model_output else: raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' ) # 3. Clip "predicted x_0" _A = self.bit_scale if self.config.clip_sample: _A = torch.clamp(_snake_case , -scale , _snake_case ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _A = 0 if t > 0: _A = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_snake_case ).to(model_output.device ) _A = (self._get_variance(_snake_case , predicted_variance=_snake_case ) ** 0.5) * noise _A = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , _UpperCAmelCase : Optional[float] = 1.0 , ): super().__init__() _A = bit_scale _A = ( ddim_bit_scheduler_step if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else ddpm_bit_scheduler_step ) self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__( self : Optional[int] , _UpperCAmelCase : Optional[int] = 256 , _UpperCAmelCase : Optional[int] = 256 , _UpperCAmelCase : Optional[int] = 50 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ): _A = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=_UpperCAmelCase , ) _A = decimal_to_bits(_UpperCAmelCase ) * self.bit_scale _A = latents.to(self.device ) self.scheduler.set_timesteps(_UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _A = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample _A = bits_to_decimal(_UpperCAmelCase ) if output_type == "pil": _A = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCAmelCase )
315
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
1
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger a = get_logger(__name__) a = Path(__file__).parent / '''model_card_template.md''' a = uuida().hex a = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES a = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES a = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def _snake_case ( _snake_case : Union[Dict, str, None] = None ) -> str: '''simple docstring''' _A = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F'''; torch/{_torch_version}''' if is_flax_available(): ua += F'''; jax/{_jax_version}''' ua += F'''; flax/{_flax_version}''' if is_onnx_available(): ua += F'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(_snake_case , _snake_case ): ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(_snake_case , _snake_case ): ua += "; " + user_agent return ua def _snake_case ( _snake_case : str , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ) -> str: '''simple docstring''' if token is None: _A = HfFolder.get_token() if organization is None: _A = whoami(_snake_case )['name'] return F'''{username}/{model_id}''' else: return F'''{organization}/{model_id}''' def _snake_case ( _snake_case : Any , _snake_case : Optional[Any] ) -> int: '''simple docstring''' if not is_jinja_available(): raise ValueError( 'Modelcard rendering is based on Jinja templates.' ' Please make sure to have `jinja` installed before using `create_model_card`.' ' To install it, please run `pip install Jinja2`.' ) if hasattr(_snake_case , 'local_rank' ) and args.local_rank not in [-1, 0]: return _A = args.hub_token if hasattr(_snake_case , 'hub_token' ) else None _A = get_full_repo_name(_snake_case , token=_snake_case ) _A = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(_snake_case , 'gradient_accumulation_steps' ) else None ) , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(_snake_case , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , ) _A = os.path.join(args.output_dir , 'README.md' ) model_card.save(_snake_case ) def _snake_case ( _snake_case : Optional[str] , _snake_case : Optional[str] = None ) -> List[Any]: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash _A = str(Path(_snake_case ).as_posix() ) _A = re.search(R'snapshots/([^/]+)/' , _snake_case ) if search is None: return None _A = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. a = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) a = os.path.join(hf_cache_home, '''diffusers''') def _snake_case ( _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ) -> None: '''simple docstring''' if new_cache_dir is None: _A = DIFFUSERS_CACHE if old_cache_dir is None: _A = old_diffusers_cache _A = Path(_snake_case ).expanduser() _A = Path(_snake_case ).expanduser() for old_blob_path in old_cache_dir.glob('**/blobs/*' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): _A = new_cache_dir / old_blob_path.relative_to(_snake_case ) new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case ) os.replace(_snake_case , _snake_case ) try: os.symlink(_snake_case , _snake_case ) except OSError: logger.warning( 'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). a = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): a = 0 else: with open(cache_version_file) as f: try: a = int(f.read()) except ValueError: a = 0 if cache_version < 1: a = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: a = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' '''the directory exists and can be written to.''' ) def _snake_case ( _snake_case : str , _snake_case : Optional[str] = None ) -> str: '''simple docstring''' if variant is not None: _A = weights_name.split('.' ) _A = splits[:-1] + [variant] + splits[-1:] _A = '.'.join(_snake_case ) return weights_name def _snake_case ( _snake_case : int , *, _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : int , _snake_case : Union[str, Any]=None , ) -> str: '''simple docstring''' _A = str(_snake_case ) if os.path.isfile(_snake_case ): return pretrained_model_name_or_path elif os.path.isdir(_snake_case ): if os.path.isfile(os.path.join(_snake_case , _snake_case ) ): # Load from a PyTorch checkpoint _A = os.path.join(_snake_case , _snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(_snake_case , _snake_case , _snake_case ) ): _A = os.path.join(_snake_case , _snake_case , _snake_case ) return model_file else: raise EnvironmentError( F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(_snake_case ).base_version ) >= version.parse('0.20.0' ) ): try: _A = hf_hub_download( _snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , ) warnings.warn( F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , _snake_case , ) return model_file except: # noqa: E722 warnings.warn( F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}\' so that the correct variant file can be added.''' , _snake_case , ) try: # 2. Load model file as usual _A = hf_hub_download( _snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' 'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ' 'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ' 'login`.' ) except RevisionNotFoundError: raise EnvironmentError( F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' 'this model name. Check the model page at ' F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' F''' directory containing a file named {weights_name} or''' ' \nCheckout your internet connection or see how to run the library in' ' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' ) except EnvironmentError: raise EnvironmentError( F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' '\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ' F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' F'''containing a file named {weights_name}''' )
315
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : str=32 , _UpperCAmelCase : List[Any]=5 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[int]="None" , _UpperCAmelCase : Any=3 , _UpperCAmelCase : str=4 , _UpperCAmelCase : List[Any]=None , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = relative_attention _A = position_biased_input _A = pos_att_type _A = scope def lowerCAmelCase_ ( self : Dict ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : List[str] ): return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = self.get_config() _A = 300 return config def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Optional[int] ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ): _A = DebertaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0] _A = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0] _A = model(_UpperCAmelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ): _A = DebertaForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ): _A = self.num_labels _A = DebertaForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Any ): _A = self.num_labels _A = DebertaForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any ): _A = DebertaForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Dict ): _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Any = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) UpperCAmelCase : Union[str, Any] = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase : Optional[int] = True UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : str = False UpperCAmelCase : Optional[Any] = False def lowerCAmelCase_ ( self : str ): _A = DebertaModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : str ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : str ): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = DebertaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class lowercase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def lowerCAmelCase_ ( self : List[Any] ): pass @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _A = DebertaModel.from_pretrained('microsoft/deberta-base' ) _A = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) _A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] # compare the actual values for a slice. _A = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
315
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
1
"""simple docstring""" import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets a = '''\ @inproceedings{lin-2004-rouge, title = "{ROUGE}: A Package for Automatic Evaluation of Summaries", author = "Lin, Chin-Yew", booktitle = "Text Summarization Branches Out", month = jul, year = "2004", address = "Barcelona, Spain", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W04-1013", pages = "74--81", } ''' a = '''\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge ''' a = ''' Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring, `"rougeL"`: Longest common subsequence based scoring. `"rougeLSum"`: rougeLsum splits text using `"\n"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric(\'rouge\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\'] >>> print(results["rouge1"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results["rouge1"].mid.fmeasure) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=False ): if rouge_types is None: _A = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] _A = rouge_scorer.RougeScorer(rouge_types=_UpperCAmelCase , use_stemmer=_UpperCAmelCase ) if use_aggregator: _A = scoring.BootstrapAggregator() else: _A = [] for ref, pred in zip(_UpperCAmelCase , _UpperCAmelCase ): _A = scorer.score(_UpperCAmelCase , _UpperCAmelCase ) if use_aggregator: aggregator.add_scores(_UpperCAmelCase ) else: scores.append(_UpperCAmelCase ) if use_aggregator: _A = aggregator.aggregate() else: _A = {} for key in scores[0]: _A = [score[key] for score in scores] return result
315
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
1
"""simple docstring""" a = 256 # Modulus to hash a string a = 1_000_003 def _snake_case ( _snake_case : str , _snake_case : str ) -> bool: '''simple docstring''' _A = len(_snake_case ) _A = len(_snake_case ) if p_len > t_len: return False _A = 0 _A = 0 _A = 1 # Calculating the hash of pattern and substring of text for i in range(_snake_case ): _A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def _snake_case ( ) -> None: '''simple docstring''' _A = 'abc1abc12' _A = 'alskfjaldsabc1abc1abc12k23adsfabcabc' _A = 'alskfjaldsk23adsfabcabc' assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case ) # Test 2) _A = 'ABABX' _A = 'ABABZABABYABABX' assert rabin_karp(_snake_case , _snake_case ) # Test 3) _A = 'AAAB' _A = 'ABAAAAAB' assert rabin_karp(_snake_case , _snake_case ) # Test 4) _A = 'abcdabcy' _A = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(_snake_case , _snake_case ) # Test 5) _A = 'Lü' _A = 'Lüsai' assert rabin_karp(_snake_case , _snake_case ) _A = 'Lue' assert not rabin_karp(_snake_case , _snake_case ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
315
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
1
"""simple docstring""" import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets a = datasets.logging.get_logger(__name__) a = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' a = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' a = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' a = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Union[str, Any] ): # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( 'Using default BLEURT-Base checkpoint for sequence maximum length 128. ' 'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' ) _A = 'bleurt-base-128' if self.config_name.lower() in CHECKPOINT_URLS: _A = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: _A = self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer _A = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) _A = score.BleurtScorer(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str ): _A = self.scorer.score(references=_UpperCAmelCase , candidates=_UpperCAmelCase ) return {"scores": scores}
315
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Dict = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) UpperCAmelCase : Any = '''CIDAS/clipseg-rd64-refined''' UpperCAmelCase : Union[str, Any] = '''image_segmenter''' UpperCAmelCase : List[Any] = CLIPSegForImageSegmentation UpperCAmelCase : str = ['''image''', '''text'''] UpperCAmelCase : Optional[int] = ['''image'''] def __init__( self : List[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Tuple ): requires_backends(self , ['vision'] ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ): return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors='pt' ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[str, Any] ): with torch.no_grad(): _A = self.model(**_UpperCAmelCase ).logits return logits def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ): _A = outputs.cpu().detach().numpy() _A = 0 _A = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
315
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva a = '''''' a = '''''' a = '''''' a = 1 # (0 is vertical, 1 is horizontal) def _snake_case ( ) -> None: '''simple docstring''' _A , _A = get_dataset(_snake_case , _snake_case ) print('Processing...' ) _A , _A , _A = update_image_and_anno(_snake_case , _snake_case , _snake_case ) for index, image in enumerate(_snake_case ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _A = random_chars(32 ) _A = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] _A = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(F'''/{file_root}.jpg''' , _snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Success {index+1}/{len(_snake_case )} with {file_name}''' ) _A = [] for anno in new_annos[index]: _A = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(_snake_case ) with open(F'''/{file_root}.txt''' , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def _snake_case ( _snake_case : str , _snake_case : str ) -> tuple[list, list]: '''simple docstring''' _A = [] _A = [] for label_file in glob.glob(os.path.join(_snake_case , '*.txt' ) ): _A = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(_snake_case ) as in_file: _A = in_file.readlines() _A = os.path.join(_snake_case , F'''{label_name}.jpg''' ) _A = [] for obj_list in obj_lists: _A = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_snake_case ) labels.append(_snake_case ) return img_paths, labels def _snake_case ( _snake_case : list , _snake_case : list , _snake_case : int = 1 ) -> tuple[list, list, list]: '''simple docstring''' _A = [] _A = [] _A = [] for idx in range(len(_snake_case ) ): _A = [] _A = img_list[idx] path_list.append(_snake_case ) _A = anno_list[idx] _A = cva.imread(_snake_case ) if flip_type == 1: _A = cva.flip(_snake_case , _snake_case ) for bbox in img_annos: _A = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _A = cva.flip(_snake_case , _snake_case ) for bbox in img_annos: _A = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_snake_case ) new_imgs_list.append(_snake_case ) return new_imgs_list, new_annos_lists, path_list def _snake_case ( _snake_case : int = 32 ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" _A = ascii_lowercase + digits return "".join(random.choice(_snake_case ) for _ in range(_snake_case ) ) if __name__ == "__main__": main() print('''DONE ✅''')
315
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
1
"""simple docstring""" # using dfs for finding eulerian path traversal def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Tuple=None ) -> List[str]: '''simple docstring''' _A = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: _A , _A = True, True _A = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) return path def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> Any: '''simple docstring''' _A = 0 _A = -1 for i in range(_snake_case ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 _A = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def _snake_case ( _snake_case : Optional[int] , _snake_case : int ) -> Any: '''simple docstring''' _A = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] _A , _A = check_circuit_or_path(_snake_case , _snake_case ) if check == 3: print('graph is not Eulerian' ) print('no path' ) return _A = 1 if check == 2: _A = odd_node print('graph has a Euler path' ) if check == 1: print('graph has a Euler cycle' ) _A = dfs(_snake_case , _snake_case , _snake_case ) print(_snake_case ) def _snake_case ( ) -> int: '''simple docstring''' _A = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} _A = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} _A = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} _A = {1: [2, 3], 2: [1, 3], 3: [1, 2]} _A = { 1: [], 2: [] # all degree is zero } _A = 10 check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) if __name__ == "__main__": main()
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
1
"""simple docstring""" import qiskit def _snake_case ( _snake_case : int , _snake_case : int ) -> qiskit.result.counts.Counts: '''simple docstring''' _A = qiskit.Aer.get_backend('aer_simulator' ) _A = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator _A = qiskit.execute(_snake_case , _snake_case , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(_snake_case ) if __name__ == "__main__": a = half_adder(1, 1) print(F'''Half Adder Output Qubit Counts: {counts}''')
315
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
1
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = ['''image_processor''', '''tokenizer'''] UpperCAmelCase : Optional[int] = '''AutoImageProcessor''' UpperCAmelCase : Any = '''AutoTokenizer''' def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : int ): _A = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _UpperCAmelCase , ) _A = kwargs.pop('feature_extractor' ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) _A = self.image_processor _A = False def __call__( self : int , *_UpperCAmelCase : int , **_UpperCAmelCase : Any ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase ) _A = kwargs.pop('images' , _UpperCAmelCase ) _A = kwargs.pop('text' , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: _A = args[0] _A = args[1:] if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: _A = self.image_processor(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) if text is not None: _A = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _A = encodings['input_ids'] return inputs def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ): return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : Dict ): return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @contextmanager def lowerCAmelCase_ ( self : int ): warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your images inputs, or in a separate call.' ) _A = True _A = self.tokenizer yield _A = self.image_processor _A = False def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=False , _UpperCAmelCase : int=None ): if added_vocab is None: _A = self.tokenizer.get_added_vocab() _A = {} while tokens: _A = re.search(r'<s_(.*?)>' , _UpperCAmelCase , re.IGNORECASE ) if start_token is None: break _A = start_token.group(1 ) _A = re.search(rF'''</s_{key}>''' , _UpperCAmelCase , re.IGNORECASE ) _A = start_token.group() if end_token is None: _A = tokens.replace(_UpperCAmelCase , '' ) else: _A = end_token.group() _A = re.escape(_UpperCAmelCase ) _A = re.escape(_UpperCAmelCase ) _A = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , _UpperCAmelCase , re.IGNORECASE ) if content is not None: _A = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _A = self.tokenajson(_UpperCAmelCase , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase ) if value: if len(_UpperCAmelCase ) == 1: _A = value[0] _A = value else: # leaf nodes _A = [] for leaf in content.split(r'<sep/>' ): _A = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _A = leaf[1:-2] # for categorical special tokens output[key].append(_UpperCAmelCase ) if len(output[key] ) == 1: _A = output[key][0] _A = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase ) if len(_UpperCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowerCAmelCase_ ( self : Optional[Any] ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , ) return self.image_processor_class @property def lowerCAmelCase_ ( self : List[Any] ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , ) return self.image_processor
315
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
1
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = '''▁''' a = {'''vocab_file''': '''sentencepiece.bpe.model'''} a = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } a = { '''facebook/nllb-200-distilled-600M''': 1_024, } # fmt: off a = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask'''] UpperCAmelCase : List[int] = [] UpperCAmelCase : List[int] = [] def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : Any="</s>" , _UpperCAmelCase : List[Any]="<s>" , _UpperCAmelCase : List[Any]="<unk>" , _UpperCAmelCase : str="<pad>" , _UpperCAmelCase : List[str]="<mask>" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=False , **_UpperCAmelCase : Dict , ): # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs _A = legacy_behaviour super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_UpperCAmelCase , **_UpperCAmelCase , ) _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) _A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _A = 1 _A = len(self.sp_model ) _A = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase ) } _A = {v: k for k, v in self.lang_code_to_id.items()} _A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _A = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _A = src_lang if src_lang is not None else 'eng_Latn' _A = self.lang_code_to_id[self._src_lang] _A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ): _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Any , _UpperCAmelCase : Union[str, Any] ): _A = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowerCAmelCase_ ( self : int ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCAmelCase_ ( self : List[str] ): return self._src_lang @src_lang.setter def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : str ): _A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) _A = [1] * len(self.prefix_tokens ) _A = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[str] , **_UpperCAmelCase : Dict ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _A = src_lang _A = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) _A = self.convert_tokens_to_ids(_UpperCAmelCase ) _A = tgt_lang_id return inputs def lowerCAmelCase_ ( self : Any ): _A = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str ): return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _A = self.sp_model.PieceToId(_UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Dict ): _A = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ' ' ).strip() return out_string def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , 'wb' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : str = "eng_Latn" , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : str = "fra_Latn" , **_UpperCAmelCase : Any , ): _A = src_lang _A = tgt_lang return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): return self.set_src_lang_special_tokens(self.src_lang ) def lowerCAmelCase_ ( self : str ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] ): _A = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _A = [] _A = [self.eos_token_id, self.cur_lang_code] else: _A = [self.cur_lang_code] _A = [self.eos_token_id] def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str ): _A = self.lang_code_to_id[lang] if self.legacy_behaviour: _A = [] _A = [self.eos_token_id, self.cur_lang_code] else: _A = [self.cur_lang_code] _A = [self.eos_token_id]
315
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
1
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
1
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
1
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) a = { '''configuration_speecht5''': [ '''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''', '''SpeechT5Config''', '''SpeechT5HifiGanConfig''', ], '''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''], '''processing_speecht5''': ['''SpeechT5Processor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''SpeechT5Tokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SpeechT5ForSpeechToText''', '''SpeechT5ForSpeechToSpeech''', '''SpeechT5ForTextToSpeech''', '''SpeechT5Model''', '''SpeechT5PreTrainedModel''', '''SpeechT5HifiGan''', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
315
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a = { '''configuration_mobilebert''': [ '''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileBertConfig''', '''MobileBertOnnxConfig''', ], '''tokenization_mobilebert''': ['''MobileBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''MobileBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileBertForMaskedLM''', '''MobileBertForMultipleChoice''', '''MobileBertForNextSentencePrediction''', '''MobileBertForPreTraining''', '''MobileBertForQuestionAnswering''', '''MobileBertForSequenceClassification''', '''MobileBertForTokenClassification''', '''MobileBertLayer''', '''MobileBertModel''', '''MobileBertPreTrainedModel''', '''load_tf_weights_in_mobilebert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileBertForMaskedLM''', '''TFMobileBertForMultipleChoice''', '''TFMobileBertForNextSentencePrediction''', '''TFMobileBertForPreTraining''', '''TFMobileBertForQuestionAnswering''', '''TFMobileBertForSequenceClassification''', '''TFMobileBertForTokenClassification''', '''TFMobileBertMainLayer''', '''TFMobileBertModel''', '''TFMobileBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
315
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
1
"""simple docstring""" a = [ '''Audio''', '''Array2D''', '''Array3D''', '''Array4D''', '''Array5D''', '''ClassLabel''', '''Features''', '''Sequence''', '''Value''', '''Image''', '''Translation''', '''TranslationVariableLanguages''', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = '''xlm-prophetnet''' UpperCAmelCase : List[str] = ['''past_key_values'''] UpperCAmelCase : str = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : str , _UpperCAmelCase : Optional[float] = 0.1 , _UpperCAmelCase : Optional[Union[str, Callable]] = "gelu" , _UpperCAmelCase : Optional[int] = 30_522 , _UpperCAmelCase : Optional[int] = 1_024 , _UpperCAmelCase : Optional[int] = 4_096 , _UpperCAmelCase : Optional[int] = 12 , _UpperCAmelCase : Optional[int] = 16 , _UpperCAmelCase : Optional[int] = 4_096 , _UpperCAmelCase : Optional[int] = 12 , _UpperCAmelCase : Optional[int] = 16 , _UpperCAmelCase : Optional[float] = 0.1 , _UpperCAmelCase : Optional[float] = 0.1 , _UpperCAmelCase : Optional[int] = 512 , _UpperCAmelCase : Optional[float] = 0.02 , _UpperCAmelCase : Optional[bool] = True , _UpperCAmelCase : Optional[bool] = True , _UpperCAmelCase : Optional[int] = 0 , _UpperCAmelCase : Optional[int] = 2 , _UpperCAmelCase : Optional[int] = 32 , _UpperCAmelCase : Optional[int] = 128 , _UpperCAmelCase : Optional[bool] = False , _UpperCAmelCase : Optional[float] = 0.0 , _UpperCAmelCase : Optional[bool] = True , _UpperCAmelCase : Optional[int] = 0 , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : Optional[int] = 2 , **_UpperCAmelCase : Optional[Any] , ): _A = vocab_size _A = hidden_size _A = encoder_ffn_dim _A = num_encoder_layers _A = num_encoder_attention_heads _A = decoder_ffn_dim _A = num_decoder_layers _A = num_decoder_attention_heads _A = max_position_embeddings _A = init_std # Normal(0, this parameter) _A = activation_function # parameters for xlmprophetnet _A = ngram _A = num_buckets _A = relative_max_distance _A = disable_ngram_loss _A = eps # 3 Types of Dropout _A = attention_dropout _A = activation_dropout _A = dropout _A = use_cache super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , add_cross_attention=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) @property def lowerCAmelCase_ ( self : Any ): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[Any] ): raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and' ' `num_decoder_layers`.' )
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" def _snake_case ( _snake_case : str , _snake_case : str ) -> bool: '''simple docstring''' _A = len(_snake_case ) _A = len(_snake_case ) _A = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _A = True for i in range(_snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _A = True if a[i].islower(): _A = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
1
"""simple docstring""" a = '''Tobias Carryer''' from time import time class lowercase_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=int(time() ) ): # noqa: B008 _A = multiplier _A = increment _A = modulo _A = seed def lowerCAmelCase_ ( self : Dict ): _A = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. a = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31) while True: print(lcg.next_number())
315
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule a = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
315
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowercase_ : '''simple docstring''' UpperCAmelCase : str = LEDConfig UpperCAmelCase : Optional[int] = {} UpperCAmelCase : Union[str, Any] = '''gelu''' def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any]=13 , _UpperCAmelCase : Dict=7 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : str=99 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Any=20 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : List[str]=4 , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id _A = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _A = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _A = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def lowerCAmelCase_ ( self : Dict ): _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _A = prepare_led_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = tf.concat( [tf.zeros_like(_UpperCAmelCase )[:, :-1], tf.ones_like(_UpperCAmelCase )[:, -1:]] , axis=-1 , ) _A = global_attention_mask return config, inputs_dict def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ): _A = TFLEDModel(config=_UpperCAmelCase ).get_decoder() _A = inputs_dict['input_ids'] _A = input_ids[:1, :] _A = inputs_dict['attention_mask'][:1, :] _A = 1 # first forward pass _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 ) def _snake_case ( _snake_case : int , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Tuple=None , _snake_case : Dict=None , _snake_case : List[str]=None , _snake_case : Any=None , ) -> Optional[int]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Tuple = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase : List[Any] = ( { '''conversational''': TFLEDForConditionalGeneration, '''feature-extraction''': TFLEDModel, '''summarization''': TFLEDForConditionalGeneration, '''text2text-generation''': TFLEDForConditionalGeneration, '''translation''': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase : Dict = True UpperCAmelCase : Any = False UpperCAmelCase : Optional[Any] = False UpperCAmelCase : Optional[int] = False def lowerCAmelCase_ ( self : Optional[Any] ): _A = TFLEDModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase ) def lowerCAmelCase_ ( self : int ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : str ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = tf.zeros_like(inputs_dict['attention_mask'] ) _A = 2 _A = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , ) _A = True _A = self.model_tester.seq_length _A = self.model_tester.encoder_seq_length def check_decoder_attentions_output(_UpperCAmelCase : str ): _A = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ): _A = [t.numpy() for t in outputs.encoder_attentions] _A = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _A = True _A = False _A = False _A = model_class(_UpperCAmelCase ) _A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) _A = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: _A = model_class(_UpperCAmelCase ) _A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _A = True _A = model_class(_UpperCAmelCase ) _A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine _A = True _A = True _A = model_class(_UpperCAmelCase ) _A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' ) def lowerCAmelCase_ ( self : Union[str, Any] ): pass def lowerCAmelCase_ ( self : str ): # TODO: Head-masking not yet implement pass def _snake_case ( _snake_case : List[Any] ) -> Optional[Any]: '''simple docstring''' return tf.constant(_snake_case , dtype=tf.intaa ) a = 1e-4 @slow @require_tf class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Union[str, Any] ): _A = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led # change to intended input here _A = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _A = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _A = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase ) _A = model(**_UpperCAmelCase )[0] _A = (1, 1_024, 768) self.assertEqual(output.shape , _UpperCAmelCase ) # change to expected output here _A = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-3 ) def lowerCAmelCase_ ( self : List[Any] ): _A = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ) # change to intended input here _A = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _A = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _A = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase ) _A = model(**_UpperCAmelCase )[0] _A = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , _UpperCAmelCase ) # change to expected output here _A = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-3 , rtol=1E-3 )
315
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Dict=18 , _UpperCAmelCase : Union[str, Any]=30 , _UpperCAmelCase : Any=400 , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Dict=True , ): _A = size if size is not None else {'height': 18, 'width': 18} _A = parent _A = batch_size _A = num_channels _A = image_size _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = apply_ocr def lowerCAmelCase_ ( self : Optional[int] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowerCAmelCase_ ( self : int ): _A = LayoutLMvaImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Any ): _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) ) def lowerCAmelCase_ ( self : int ): _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) _A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def lowerCAmelCase_ ( self : List[Any] ): pass def lowerCAmelCase_ ( self : Union[str, Any] ): # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , _UpperCAmelCase ) self.assertIsInstance(encoding.boxes , _UpperCAmelCase ) # Test batched _A = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _A = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def lowerCAmelCase_ ( self : Union[str, Any] ): # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _A = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def lowerCAmelCase_ ( self : List[Any] ): # with apply_OCR = True _A = LayoutLMvaImageProcessor() from datasets import load_dataset _A = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) _A = Image.open(ds[0]['file'] ).convert('RGB' ) _A = image_processing(_UpperCAmelCase , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 _A = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 _A = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _UpperCAmelCase ) self.assertListEqual(encoding.boxes , _UpperCAmelCase ) # with apply_OCR = False _A = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase ) _A = image_processing(_UpperCAmelCase , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
315
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
1
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _snake_case ( _snake_case : str , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Any ) -> Union[str, Any]: '''simple docstring''' if isinstance(_snake_case , _snake_case ): _A = np.full((len(_snake_case ), sequence_length, 2) , _snake_case ) else: _A = np.full((len(_snake_case ), sequence_length) , _snake_case ) for i, tensor in enumerate(_snake_case ): if padding_side == "right": if isinstance(_snake_case , _snake_case ): _A = tensor[:sequence_length] else: _A = tensor[:sequence_length] else: if isinstance(_snake_case , _snake_case ): _A = tensor[:sequence_length] else: _A = tensor[:sequence_length] return out_tensor.tolist() def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' _A = ord(_snake_case ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True _A = unicodedata.category(_snake_case ) if cat.startswith('P' ): return True return False @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : PreTrainedTokenizerBase UpperCAmelCase : Union[bool, str, PaddingStrategy] = True UpperCAmelCase : Optional[int] = None UpperCAmelCase : Optional[int] = None UpperCAmelCase : int = -100 UpperCAmelCase : str = "pt" def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[Any] ): import torch _A = 'label' if 'label' in features[0].keys() else 'labels' _A = [feature[label_name] for feature in features] if label_name in features[0].keys() else None _A = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , ) if labels is None: return batch _A = torch.tensor(batch['entity_ids'] ).shape[1] _A = self.tokenizer.padding_side if padding_side == "right": _A = [ list(_UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) for label in labels ] else: _A = [ [self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) + list(_UpperCAmelCase ) for label in labels ] _A = [feature['ner_tags'] for feature in features] _A = padding_tensor(_UpperCAmelCase , -1 , _UpperCAmelCase , _UpperCAmelCase ) _A = [feature['original_entity_spans'] for feature in features] _A = padding_tensor(_UpperCAmelCase , (-1, -1) , _UpperCAmelCase , _UpperCAmelCase ) _A = {k: torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
315
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
1
"""simple docstring""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str] ) -> str: '''simple docstring''' _A = RemBertConfig.from_json_file(_snake_case ) print('Building PyTorch model from configuration: {}'.format(str(_snake_case ) ) ) _A = RemBertModel(_snake_case ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_snake_case , _snake_case , _snake_case ) # Save pytorch-model print('Save PyTorch model to {}'.format(_snake_case ) ) torch.save(model.state_dict() , _snake_case ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) a = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
315
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer a = logging.get_logger(__name__) a = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} a = { '''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''}, '''tokenizer_file''': { '''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json''' }, } a = {'''mobilebert-uncased''': 512} a = {} class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : str = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Dict = MobileBertTokenizer def __init__( self : Tuple , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]="[UNK]" , _UpperCAmelCase : Union[str, Any]="[SEP]" , _UpperCAmelCase : Any="[PAD]" , _UpperCAmelCase : Optional[Any]="[CLS]" , _UpperCAmelCase : List[Any]="[MASK]" , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : List[Any] , ): super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , ) _A = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars ): _A = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) ) _A = do_lower_case _A = strip_accents _A = tokenize_chinese_chars _A = normalizer_class(**_UpperCAmelCase ) _A = do_lower_case def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Any=None ): _A = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): _A = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase )
315
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
1
"""simple docstring""" # Algorithm for the pigeonhole sorting def _snake_case ( _snake_case : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _A = min(_snake_case ) # min() finds the minimum value _A = max(_snake_case ) # max() finds the maximum value _A = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size _A = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_snake_case , _snake_case ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. _A = 0 for count in range(_snake_case ): while holes[count] > 0: holes[count] -= 1 _A = count + min_val i += 1 def _snake_case ( ) -> Dict: '''simple docstring''' _A = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_snake_case ) print('Sorted order is:' , ' '.join(_snake_case ) ) if __name__ == "__main__": main()
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : str = DDIMPipeline UpperCAmelCase : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS UpperCAmelCase : Tuple = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''latents''', '''callback''', '''callback_steps''', } UpperCAmelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS UpperCAmelCase : Dict = False def lowerCAmelCase_ ( self : Optional[Any] ): torch.manual_seed(0 ) _A = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) _A = DDIMScheduler() _A = {'unet': unet, 'scheduler': scheduler} return components def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : str=0 ): if str(_UpperCAmelCase ).startswith('mps' ): _A = torch.manual_seed(_UpperCAmelCase ) else: _A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) _A = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self : Tuple ): _A = 'cpu' _A = self.get_dummy_components() _A = self.pipeline_class(**_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = self.get_dummy_inputs(_UpperCAmelCase ) _A = pipe(**_UpperCAmelCase ).images _A = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) _A = np.array( [1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4] ) _A = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_UpperCAmelCase , 1E-3 ) def lowerCAmelCase_ ( self : List[str] ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def lowerCAmelCase_ ( self : List[str] ): super().test_save_load_local(expected_max_difference=3E-3 ) def lowerCAmelCase_ ( self : Optional[Any] ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def lowerCAmelCase_ ( self : Tuple ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = 'google/ddpm-cifar10-32' _A = UNetaDModel.from_pretrained(_UpperCAmelCase ) _A = DDIMScheduler() _A = DDIMPipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) ddim.to(_UpperCAmelCase ) ddim.set_progress_bar_config(disable=_UpperCAmelCase ) _A = torch.manual_seed(0 ) _A = ddim(generator=_UpperCAmelCase , eta=0.0 , output_type='numpy' ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _A = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self : Optional[int] ): _A = 'google/ddpm-ema-bedroom-256' _A = UNetaDModel.from_pretrained(_UpperCAmelCase ) _A = DDIMScheduler.from_pretrained(_UpperCAmelCase ) _A = DDIMPipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) ddpm.to(_UpperCAmelCase ) ddpm.set_progress_bar_config(disable=_UpperCAmelCase ) _A = torch.manual_seed(0 ) _A = ddpm(generator=_UpperCAmelCase , output_type='numpy' ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _A = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
315
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
1
"""simple docstring""" import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel a = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 48_000, '''sample_size''': 65_536, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 48_000, '''sample_size''': 65_536, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 48_000, '''sample_size''': 131_072, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 16_000, '''sample_size''': 65_536, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 16_000, '''sample_size''': 65_536, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 16_000, '''sample_size''': 65_536, }, } def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ) -> str: '''simple docstring''' return torch.atana(_snake_case , _snake_case ) / math.pi * 2 def _snake_case ( _snake_case : Optional[int] ) -> Optional[int]: '''simple docstring''' _A = torch.sin(t * math.pi / 2 ) ** 2 _A = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_snake_case , _snake_case ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' pass class lowercase_ ( nn.Module ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : str ): super().__init__() _A = DiffusionAttnUnetaD(_UpperCAmelCase , n_attn_layers=4 ) _A = deepcopy(self.diffusion ) _A = torch.quasirandom.SobolEngine(1 , scramble=_UpperCAmelCase ) def _snake_case ( _snake_case : List[str] ) -> str: '''simple docstring''' _A = MODELS_MAP[model_name]['url'] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' a = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } a = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } a = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } a = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } a = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } a = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if name.startswith('skip' ): return name.replace('skip' , RES_CONV_MAP['skip'] ) # name has to be of format main.{digit} if not name.startswith('main.' ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def _snake_case ( _snake_case : Any ) -> Optional[int]: '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(_snake_case ) and not isinstance(_snake_case , _snake_case ): return name.replace(_snake_case , _snake_case ) elif name.startswith(_snake_case ): return [name.replace(_snake_case , _snake_case ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[Any]=13 ) -> int: '''simple docstring''' _A = input_string if string.split('.' )[0] == "timestep_embed": return string.replace('timestep_embed' , 'time_proj' ) _A = 0 if string.startswith('net.3.' ): depth += 1 _A = string[6:] elif string.startswith('net.' ): _A = string[4:] while string.startswith('main.7.' ): depth += 1 _A = string[7:] if string.startswith('main.' ): _A = string[5:] # mid block if string[:2].isdigit(): _A = string[:2] _A = string[2:] else: _A = string[0] _A = string[1:] if depth == max_depth: _A = MID_NUM_TO_LAYER[layer_num] _A = 'mid_block' elif depth > 0 and int(_snake_case ) < 7: _A = DOWN_NUM_TO_LAYER[layer_num] _A = F'''down_blocks.{depth}''' elif depth > 0 and int(_snake_case ) > 7: _A = UP_NUM_TO_LAYER[layer_num] _A = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: _A = DEPTH_0_TO_LAYER[layer_num] _A = F'''up_blocks.{max_depth - 1}''' if int(_snake_case ) > 3 else 'down_blocks.0' if not string_left.startswith('.' ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) _A = string_left[1:] if "resnets" in new_layer: _A = convert_resconv_naming(_snake_case ) elif "attentions" in new_layer: _A = convert_attn_naming(_snake_case ) _A = new_string_left if not isinstance(_snake_case , _snake_case ): _A = prefix + '.' + new_layer + '.' + string_left else: _A = [prefix + '.' + new_layer + '.' + s for s in string_left] return new_string def _snake_case ( _snake_case : str ) -> List[str]: '''simple docstring''' _A = {} for k, v in state_dict.items(): if k.endswith('kernel' ): # up- and downsample layers, don't have trainable weights continue _A = rename(_snake_case ) # check if we need to transform from Conv => Linear for attention if isinstance(_snake_case , _snake_case ): _A = transform_conv_attns(_snake_case , _snake_case , _snake_case ) else: _A = v return new_state_dict def _snake_case ( _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' if len(_snake_case ) == 1: if len(v.shape ) == 3: # weight _A = v[:, :, 0] else: # bias _A = v else: # qkv matrices _A = v.shape[0] _A = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _A = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _A = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def _snake_case ( _snake_case : str ) -> Dict: '''simple docstring''' _A = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _A = args.model_path.split('/' )[-1].split('.' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' _A = download(_snake_case ) _A = MODELS_MAP[model_name]['sample_rate'] _A = MODELS_MAP[model_name]['sample_size'] _A = Object() _A = sample_size _A = sample_rate _A = 0 _A = UNetaDModel(sample_size=_snake_case , sample_rate=_snake_case ) _A = diffusers_model.state_dict() _A = DiffusionUncond(_snake_case ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_snake_case )['state_dict'] ) _A = orig_model.diffusion_ema.eval() _A = orig_model.state_dict() _A = rename_orig_weights(_snake_case ) _A = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _A = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_snake_case ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith('kernel' ) for k in list(_snake_case ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": _A = value.squeeze() _A = value diffusers_model.load_state_dict(_snake_case ) _A = 1_00 _A = 33 _A = IPNDMScheduler(num_train_timesteps=_snake_case ) _A = torch.manual_seed(_snake_case ) _A = torch.randn([1, 2, config.sample_size] , generator=_snake_case ).to(_snake_case ) _A = torch.linspace(1 , 0 , steps + 1 , device=_snake_case )[:-1] _A = get_crash_schedule(_snake_case ) _A = DanceDiffusionPipeline(unet=_snake_case , scheduler=_snake_case ) _A = torch.manual_seed(33 ) _A = pipe(num_inference_steps=_snake_case , generator=_snake_case ).audios _A = sampling.iplms_sample(_snake_case , _snake_case , _snake_case , {} ) _A = generated.clamp(-1 , 1 ) _A = (generated - audio).abs().sum() _A = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('Diff sum' , _snake_case ) print('Diff max' , _snake_case ) assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') a = parser.parse_args() main(args)
315
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = '''vit_msn''' def __init__( self : Optional[Any] , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=3_072 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : int=1E-0_6 , _UpperCAmelCase : Tuple=224 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : Any , ): super().__init__(**_UpperCAmelCase ) _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = layer_norm_eps _A = image_size _A = patch_size _A = num_channels _A = qkv_bias
315
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
1
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) a = logging.getLogger(__name__) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) UpperCAmelCase : Optional[str] = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) UpperCAmelCase : Optional[int] = field( default=1024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=128 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCAmelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) UpperCAmelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) UpperCAmelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''Source language id for translation.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''Target language id for translation.'''} ) UpperCAmelCase : Optional[int] = field(default=__lowerCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def _snake_case ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[Any] ) -> str: '''simple docstring''' logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(_snake_case , os.path.join(_snake_case , F'''{split}_results.json''' ) ) def _snake_case ( ) -> Optional[int]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() check_output_dir(_snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , _snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _A = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(_snake_case , _snake_case , _snake_case ): assert hasattr(_snake_case , _snake_case ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(_snake_case , _snake_case , getattr(_snake_case , _snake_case ) ) _A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _A = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_snake_case , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_snake_case , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _A = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_snake_case , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_snake_case , _snake_case ): _A = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _A = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _A = SeqaSeqDataset # Get datasets _A = ( dataset_class( _snake_case , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_train else None ) _A = ( dataset_class( _snake_case , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _A = ( dataset_class( _snake_case , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_predict else None ) # Initialize our Trainer _A = ( build_compute_metrics_fn(data_args.task , _snake_case ) if training_args.predict_with_generate else None ) _A = SeqaSeqTrainer( model=_snake_case , args=_snake_case , data_args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , data_collator=SeqaSeqDataCollator( _snake_case , _snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_snake_case , tokenizer=_snake_case , ) _A = {} # Training if training_args.do_train: logger.info('*** Train ***' ) _A = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _A = train_result.metrics _A = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('train' , _snake_case , training_args.output_dir ) all_metrics.update(_snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _A = trainer.evaluate(metric_key_prefix='val' ) _A = data_args.n_val _A = round(metrics['val_loss'] , 4 ) if trainer.is_world_process_zero(): handle_metrics('val' , _snake_case , training_args.output_dir ) all_metrics.update(_snake_case ) if training_args.do_predict: logger.info('*** Predict ***' ) _A = trainer.predict(test_dataset=_snake_case , metric_key_prefix='test' ) _A = test_output.metrics _A = data_args.n_test if trainer.is_world_process_zero(): _A = round(metrics['test_loss'] , 4 ) handle_metrics('test' , _snake_case , training_args.output_dir ) all_metrics.update(_snake_case ) if training_args.predict_with_generate: _A = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) _A = lmap(str.strip , _snake_case ) write_txt_file(_snake_case , os.path.join(training_args.output_dir , 'test_generations.txt' ) ) if trainer.is_world_process_zero(): save_json(_snake_case , os.path.join(training_args.output_dir , 'all_results.json' ) ) return all_metrics def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
1
"""simple docstring""" from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch a = logging.get_logger(__name__) @add_end_docstrings( __lowerCAmelCase , r''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : GenericTensor ): if self.framework == "tf": _A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _A = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase ) else: raise ValueError('Unsupported framework' ) return masked_index def lowerCAmelCase_ ( self : str , _UpperCAmelCase : GenericTensor ): _A = self.get_masked_index(_UpperCAmelCase ) _A = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : GenericTensor ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['input_ids'][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Tuple ): if return_tensors is None: _A = self.framework _A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.ensure_exactly_one_mask_token(_UpperCAmelCase ) return model_inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[Any] ): _A = self.model(**_UpperCAmelCase ) _A = model_inputs['input_ids'] return model_outputs def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int=5 , _UpperCAmelCase : str=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _A = target_ids.shape[0] _A = model_outputs['input_ids'][0] _A = model_outputs['logits'] if self.framework == "tf": _A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _A = outputs.numpy() _A = outputs[0, masked_index, :] _A = stable_softmax(_UpperCAmelCase , axis=-1 ) if target_ids is not None: _A = tf.gather_nd(tf.squeeze(_UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) ) _A = tf.expand_dims(_UpperCAmelCase , 0 ) _A = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase ) _A , _A = topk.values.numpy(), topk.indices.numpy() else: _A = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _A = outputs[0, masked_index, :] _A = logits.softmax(dim=-1 ) if target_ids is not None: _A = probs[..., target_ids] _A , _A = probs.topk(_UpperCAmelCase ) _A = [] _A = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _A = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _A = input_ids.numpy().copy() if target_ids is not None: _A = target_ids[p].tolist() _A = p # Filter padding out: _A = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _A = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) _A = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence} row.append(_UpperCAmelCase ) result.append(_UpperCAmelCase ) if single_mask: return result[0] return result def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=None ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _A = [targets] try: _A = self.tokenizer.get_vocab() except Exception: _A = {} _A = [] for target in targets: _A = vocab.get(_UpperCAmelCase , _UpperCAmelCase ) if id_ is None: _A = self.tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , max_length=1 , truncation=_UpperCAmelCase , )['input_ids'] if len(_UpperCAmelCase ) == 0: logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' 'We cannot replace it with anything meaningful, ignoring it' ) continue _A = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) _A = list(set(_UpperCAmelCase ) ) if len(_UpperCAmelCase ) == 0: raise ValueError('At least one target must be provided when passed.' ) _A = np.array(_UpperCAmelCase ) return target_ids def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Any=None ): _A = {} if targets is not None: _A = self.get_target_ids(_UpperCAmelCase , _UpperCAmelCase ) _A = target_ids if top_k is not None: _A = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' ) return {}, {}, postprocess_params def __call__( self : List[Any] , _UpperCAmelCase : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : str ): _A = super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1: return outputs[0] return outputs
315
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness a = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' a = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' a = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' a = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' a = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=[1, 10, 100] , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : int=3.0 ): if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.' ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: _A = [] _A = Counter() _A = 0 _A = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: _A = candidate + '\n' + test_case _A = (test_program, timeout, task_id, completion_id[task_id]) _A = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): _A = future.result() results[result["task_id"]].append((result['completion_id'], result) ) _A , _A = [], [] for result in results.values(): result.sort() _A = [r[1]['passed'] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) _A = np.array(_UpperCAmelCase ) _A = np.array(_UpperCAmelCase ) _A = k _A = {F'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( _snake_case : str , _snake_case : Optional[Any] , _snake_case : Tuple ) -> int: '''simple docstring''' def estimator(_snake_case : int , _snake_case : int , _snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(_snake_case , _snake_case ): _A = itertools.repeat(_snake_case , len(_snake_case ) ) else: assert len(_snake_case ) == len(_snake_case ) _A = iter(_snake_case ) return np.array([estimator(int(_snake_case ) , int(_snake_case ) , _snake_case ) for n, c in zip(_snake_case , _snake_case )] )
315
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowercase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A = XLMRobertaModel.from_pretrained('xlm-roberta-base' ) _A = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] ) # The dog is cute and lives in the garden house _A = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim _A = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _A = model(_UpperCAmelCase )['last_hidden_state'].detach() self.assertEqual(output.shape , _UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1E-3 ) ) @slow def lowerCAmelCase_ ( self : List[Any] ): _A = XLMRobertaModel.from_pretrained('xlm-roberta-large' ) _A = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] ) # The dog is cute and lives in the garden house _A = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim _A = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _A = model(_UpperCAmelCase )['last_hidden_state'].detach() self.assertEqual(output.shape , _UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1E-3 ) )
315
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
1
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 a = get_tests_dir('''fixtures/dummy-config.json''') class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[str] ): _A = 0 def lowerCAmelCase_ ( self : Any ): self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = AutoConfig.from_pretrained('bert-base-uncased' ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = AutoConfig.for_model('roberta' ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. _A = os.path.join(_UpperCAmelCase , 'fake-roberta' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'config.json' ) , 'w' ) as f: f.write(json.dumps({} ) ) _A = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertEqual(type(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): try: AutoConfig.register('custom' , _UpperCAmelCase ) # Wrong model type will raise an error with self.assertRaises(_UpperCAmelCase ): AutoConfig.register('model' , _UpperCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCAmelCase ): AutoConfig.register('bert' , _UpperCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API _A = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_UpperCAmelCase ) _A = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def lowerCAmelCase_ ( self : Optional[int] ): with self.assertRaisesRegex( _UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ): _A = AutoConfig.from_pretrained('bert-base' ) def lowerCAmelCase_ ( self : int ): with self.assertRaisesRegex( _UpperCAmelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): _A = AutoConfig.from_pretrained(_UpperCAmelCase , revision='aaaaaa' ) def lowerCAmelCase_ ( self : List[str] ): with self.assertRaisesRegex( _UpperCAmelCase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ): _A = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' ) def lowerCAmelCase_ ( self : Union[str, Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_UpperCAmelCase ): _A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_UpperCAmelCase ): _A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_UpperCAmelCase ) _A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_UpperCAmelCase ) self.assertEqual(config.__class__.__name__ , 'NewModelConfig' ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_UpperCAmelCase ) _A = AutoConfig.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase ) self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' ) def lowerCAmelCase_ ( self : Tuple ): class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = '''new-model''' try: AutoConfig.register('new-model' , _UpperCAmelCase ) # If remote code is not set, the default is to use local _A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' ) self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' ) # If remote code is disabled, we load the local one. _A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_UpperCAmelCase ) self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' ) # If remote is enabled, we load from the Hub _A = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_UpperCAmelCase ) self.assertEqual(config.__class__.__name__ , 'NewModelConfig' ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
315
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
1
"""simple docstring""" import operator as op a = '''scaler.pt''' a = '''pytorch_model''' a = '''random_states''' a = '''optimizer''' a = '''scheduler''' a = '''pytorch_model.bin''' a = '''pytorch_model.bin.index.json''' a = '''model.safetensors''' a = '''model.safetensors.index.json''' a = '''1.10.2''' a = '''py38''' a = '''4.17.0''' a = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge'''] a = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2'''] a = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP'''] a = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH'''] a = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT'''] a = '''2.0.1''' a = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich'''] a = ['''default''', '''reduce-overhead''', '''max-autotune'''] a = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 a = [ '''nnodes''', '''nproc_per_node''', '''rdzv_backend''', '''rdzv_endpoint''', '''rdzv_id''', '''rdzv_conf''', '''standalone''', '''max_restarts''', '''monitor_interval''', '''start_method''', '''role''', '''module''', '''m''', '''no_python''', '''run_path''', '''log_dir''', '''r''', '''redirects''', '''t''', '''tee''', '''node_rank''', '''master_addr''', '''master_port''', ] a = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM'''] a = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
315
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
1
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = RoCBertTokenizer UpperCAmelCase : int = None UpperCAmelCase : Tuple = False UpperCAmelCase : Tuple = True UpperCAmelCase : Tuple = filter_non_english def lowerCAmelCase_ ( self : Union[str, Any] ): super().setUp() _A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd'] _A = {} _A = {} for i, value in enumerate(_UpperCAmelCase ): _A = i _A = i _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer: json.dump(_UpperCAmelCase , _UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer: json.dump(_UpperCAmelCase , _UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) _A = tokenizer.tokenize('你好[SEP]你是谁' ) self.assertListEqual(_UpperCAmelCase , ['你', '好', '[SEP]', '你', '是', '谁'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] ) def lowerCAmelCase_ ( self : Dict ): _A = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self : Optional[int] ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def lowerCAmelCase_ ( self : Tuple ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self : int ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self : Dict ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self : str ): _A = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _A = {} for i, token in enumerate(_UpperCAmelCase ): _A = i _A = RoCBertWordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def lowerCAmelCase_ ( self : Optional[Any] ): self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def lowerCAmelCase_ ( self : Tuple ): _A = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) if self.test_rust_tokenizer: _A = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) def lowerCAmelCase_ ( self : Any ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' _A = tokenizer_r.encode_plus( _UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , ) _A = tokenizer_r.do_lower_case if hasattr(_UpperCAmelCase , 'do_lower_case' ) else False _A = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def lowerCAmelCase_ ( self : int ): _A = ['的', '人', '有'] _A = ''.join(_UpperCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A = True _A = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase ) _A = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = False _A = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase ) _A = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". _A = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_UpperCAmelCase ) ] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _A = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) _A = tokenizer.encode('你好' , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.encode('你是谁' , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) _A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowerCAmelCase_ ( self : Optional[int] ): _A = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A = '你好,你是谁' _A = tokenizer.tokenize(_UpperCAmelCase ) _A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) _A = tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase ) _A = tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase ) _A = tokenizer.prepare_for_model( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers a = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def _snake_case ( _snake_case : int , _snake_case : Union[str, Any]=None ) -> Tuple: '''simple docstring''' require_version(deps[pkg] , _snake_case )
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import math class lowercase_ : '''simple docstring''' def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : list[list[float]] , _UpperCAmelCase : list[int] ): _A = 0.0 _A = 0.0 for i in range(len(_UpperCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : str , _UpperCAmelCase : list[list[int | float]] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int , _UpperCAmelCase : float ): for i in range(len(_UpperCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _snake_case ( ) -> None: '''simple docstring''' _A = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _A = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _A = SelfOrganizingMap() _A = 3 _A = 0.5 for _ in range(_snake_case ): for j in range(len(_snake_case ) ): # training sample _A = training_samples[j] # Compute the winning vector _A = self_organizing_map.get_winner(_snake_case , _snake_case ) # Update the winning vector _A = self_organizing_map.update(_snake_case , _snake_case , _snake_case , _snake_case ) # classify test sample _A = [0, 0, 0, 1] _A = self_organizing_map.get_winner(_snake_case , _snake_case ) # results print(F'''Clusters that the test sample belongs to : {winner}''' ) print(F'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
315
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger(__name__) def _snake_case ( _snake_case : str ) -> Tuple: '''simple docstring''' _A = SwinConfig( embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , ) _A = DetaConfig( backbone_config=_snake_case , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_snake_case , with_box_refine=_snake_case , two_stage=_snake_case , ) # set labels _A = 'huggingface/label-files' if "o365" in model_name: _A = 3_66 _A = 'object365-id2label.json' else: _A = 91 _A = 'coco-detection-id2label.json' _A = num_labels _A = json.load(open(cached_download(hf_hub_url(_snake_case , _snake_case , repo_type='dataset' ) ) , 'r' ) ) _A = {int(_snake_case ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} return config def _snake_case ( _snake_case : Tuple ) -> str: '''simple docstring''' _A = [] # stem # fmt: off rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') ) rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') ) rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') ) rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') ) rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') ) rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') ) rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') ) # fmt: on return rename_keys def _snake_case ( _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any] ) -> str: '''simple docstring''' _A = dct.pop(_snake_case ) _A = val def _snake_case ( _snake_case : List[Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _A = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' ) _A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[:dim, :] _A = in_proj_bias[: dim] _A = in_proj_weight[ dim : dim * 2, : ] _A = in_proj_bias[ dim : dim * 2 ] _A = in_proj_weight[ -dim :, : ] _A = in_proj_bias[-dim :] # fmt: on def _snake_case ( _snake_case : int , _snake_case : Tuple ) -> Optional[int]: '''simple docstring''' _A = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention _A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) _A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[:hidden_size, :] _A = in_proj_bias[:hidden_size] _A = in_proj_weight[ hidden_size : hidden_size * 2, : ] _A = in_proj_bias[hidden_size : hidden_size * 2] _A = in_proj_weight[-hidden_size:, :] _A = in_proj_bias[-hidden_size:] def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' _A = 'http://images.cocodataset.org/val2017/000000039769.jpg' _A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return im @torch.no_grad() def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : List[Any] ) -> Any: '''simple docstring''' _A = get_deta_config(_snake_case ) # load original state dict if model_name == "deta-swin-large": _A = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' ) elif model_name == "deta-swin-large-o365": _A = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' ) else: raise ValueError(F'''Model name {model_name} not supported''' ) _A = torch.load(_snake_case , map_location='cpu' )['model'] # original state dict for name, param in state_dict.items(): print(_snake_case , param.shape ) # rename keys _A = create_rename_keys(_snake_case ) for src, dest in rename_keys: rename_key(_snake_case , _snake_case , _snake_case ) read_in_swin_q_k_v(_snake_case , config.backbone_config ) read_in_decoder_q_k_v(_snake_case , _snake_case ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: _A = state_dict.pop(_snake_case ) _A = val if "input_proj" in key: _A = state_dict.pop(_snake_case ) _A = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: _A = state_dict.pop(_snake_case ) _A = val # finally, create HuggingFace model and load state dict _A = DetaForObjectDetection(_snake_case ) model.load_state_dict(_snake_case ) model.eval() _A = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(_snake_case ) # load image processor _A = DetaImageProcessor(format='coco_detection' ) # verify our conversion on image _A = prepare_img() _A = processor(images=_snake_case , return_tensors='pt' ) _A = encoding['pixel_values'] _A = model(pixel_values.to(_snake_case ) ) # verify logits print('Logits:' , outputs.logits[0, :3, :3] ) print('Boxes:' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": _A = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) _A = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": _A = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) _A = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_snake_case ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_snake_case ) , atol=1E-4 ) print('Everything ok!' ) if pytorch_dump_folder_path: # Save model and processor logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) model.save_pretrained(_snake_case ) processor.save_pretrained(_snake_case ) # Push to hub if push_to_hub: print('Pushing model and processor to hub...' ) model.push_to_hub(F'''jozhang97/{model_name}''' ) processor.push_to_hub(F'''jozhang97/{model_name}''' ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument( '''--model_name''', type=str, default='''deta-swin-large''', choices=['''deta-swin-large''', '''deta-swin-large-o365'''], help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) a = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
315
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
1
"""simple docstring""" from collections.abc import Sequence def _snake_case ( _snake_case : Sequence[float] , _snake_case : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_snake_case ) ) def _snake_case ( _snake_case : Sequence[float] , _snake_case : float ) -> float: '''simple docstring''' _A = 0.0 for coeff in reversed(_snake_case ): _A = result * x + coeff return result if __name__ == "__main__": a = (0.0, 0.0, 5.0, 9.3, 7.0) a = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
315
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
1
"""simple docstring""" class lowercase_ : '''simple docstring''' def __init__( self : Optional[Any] , _UpperCAmelCase : int ): _A = n _A = [None] * self.n _A = 0 # index of the first element _A = 0 _A = 0 def __len__( self : Union[str, Any] ): return self.size def lowerCAmelCase_ ( self : int ): return self.size == 0 def lowerCAmelCase_ ( self : int ): return False if self.is_empty() else self.array[self.front] def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple ): if self.size >= self.n: raise Exception('QUEUE IS FULL' ) _A = data _A = (self.rear + 1) % self.n self.size += 1 return self def lowerCAmelCase_ ( self : Any ): if self.size == 0: raise Exception('UNDERFLOW' ) _A = self.array[self.front] _A = None _A = (self.front + 1) % self.n self.size -= 1 return temp
315
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
1
"""simple docstring""" import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] a = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] a = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) a = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) a = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def _snake_case ( _snake_case : Dict , _snake_case : Dict ) -> str: '''simple docstring''' for tf_name, hf_name in patterns: _A = k.replace(_snake_case , _snake_case ) return k def _snake_case ( _snake_case : dict , _snake_case : dict ) -> BigBirdPegasusForConditionalGeneration: '''simple docstring''' _A = BigBirdPegasusConfig(**_snake_case ) _A = BigBirdPegasusForConditionalGeneration(_snake_case ) _A = torch_model.state_dict() _A = {} # separating decoder weights _A = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )} _A = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )} for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ): _A = [k.endswith(_snake_case ) for ending in KEYS_TO_IGNORE] if any(_snake_case ): continue _A = DECODER_PATTERNS _A = rename_state_dict_key(_snake_case , _snake_case ) if new_k not in state_dict: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): _A = v.T _A = torch.from_numpy(_snake_case ) assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ): _A = [k.endswith(_snake_case ) for ending in KEYS_TO_IGNORE] if any(_snake_case ): continue _A = REMAINING_PATTERNS _A = rename_state_dict_key(_snake_case , _snake_case ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): _A = v.T _A = torch.from_numpy(_snake_case ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' _A = mapping['model.embed_positions.weight'] _A = mapping.pop('model.embed_positions.weight' ) _A , _A = torch_model.load_state_dict(_snake_case , strict=_snake_case ) _A = [ k for k in missing if k not in [ 'final_logits_bias', 'model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight', ] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def _snake_case ( _snake_case : Any ) -> Dict: '''simple docstring''' _A = tf.train.list_variables(_snake_case ) _A = {} _A = ['global_step'] for name, shape in tqdm(_snake_case , desc='converting tf checkpoint to dict' ): _A = any(pat in name for pat in ignore_name ) if skip_key: continue _A = tf.train.load_variable(_snake_case , _snake_case ) _A = array return tf_weights def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : dict ) -> Optional[Any]: '''simple docstring''' _A = get_tf_weights_as_numpy(_snake_case ) _A = convert_bigbird_pegasus(_snake_case , _snake_case ) torch_model.save_pretrained(_snake_case ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') a = parser.parse_args() a = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
315
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available a = {'''tokenization_herbert''': ['''HerbertTokenizer''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''HerbertTokenizerFast'''] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
315
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" a = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' a = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] a = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
315
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
1
"""simple docstring""" from ... import PretrainedConfig a = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP UpperCAmelCase : str = '''nezha''' def __init__( self : str , _UpperCAmelCase : List[Any]=21_128 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=3_072 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : int=64 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Union[str, Any]=1E-1_2 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Tuple=True , **_UpperCAmelCase : List[Any] , ): super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = max_relative_position _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = classifier_dropout _A = use_cache
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
1
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy a = logging.getLogger(__name__) def _snake_case ( _snake_case : torch.nn.Module , _snake_case : BnbQuantizationConfig , _snake_case : Union[str, os.PathLike] = None , _snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None , _snake_case : Optional[List[str]] = None , _snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None , _snake_case : Optional[Union[str, os.PathLike]] = None , _snake_case : bool = False , ) -> int: '''simple docstring''' _A = bnb_quantization_config.load_in_abit _A = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) _A = [] # custom device map if isinstance(_snake_case , _snake_case ) and len(device_map.keys() ) > 1: _A = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: _A = get_keys_to_not_convert(_snake_case ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_snake_case ) _A = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: _A = [] _A = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_snake_case ) # compatibility with peft _A = load_in_abit _A = load_in_abit _A = get_parameter_device(_snake_case ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) _A = replace_with_bnb_layers(_snake_case , _snake_case , modules_to_not_convert=_snake_case ) # convert param to the right dtype _A = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: _A = name.replace('.weight' , '' ).replace('.bias' , '' ) _A = getattr(_snake_case , _snake_case , _snake_case ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_snake_case ): param.to(_snake_case ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( F'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): _A = replace_with_bnb_layers( _snake_case , _snake_case , modules_to_not_convert=_snake_case ) _A = get_quantized_model_device_map( _snake_case , _snake_case , _snake_case , max_memory=_snake_case , no_split_module_classes=_snake_case , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): _A = True _A = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _snake_case , _snake_case , _snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=_snake_case , offload_state_dict=_snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_snake_case , device_map=_snake_case , offload_dir=_snake_case ) def _snake_case ( _snake_case : List[str] , _snake_case : List[str] , _snake_case : Any=None , _snake_case : Dict=None , _snake_case : int=None ) -> Any: '''simple docstring''' if device_map is None: if torch.cuda.is_available(): _A = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_snake_case , _snake_case ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) _A = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) _A = {} _A = special_dtypes _A = no_split_module_classes _A = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": _A = get_balanced_memory( _snake_case , low_zero=(device_map == 'balanced_low_0') , max_memory=_snake_case , **_snake_case , ) _A = max_memory _A = infer_auto_device_map(_snake_case , **_snake_case ) if isinstance(_snake_case , _snake_case ): # check if don't have any quantized module on the cpu _A = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules _A = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _snake_case ( _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Union[str, Any]=None , _snake_case : Tuple=None ) -> Tuple: '''simple docstring''' if modules_to_not_convert is None: _A = [] _A , _A = _replace_with_bnb_layers( _snake_case , _snake_case , _snake_case , _snake_case ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _snake_case ( _snake_case : int , _snake_case : str , _snake_case : str=None , _snake_case : Optional[int]=None , ) -> str: '''simple docstring''' _A = False for name, module in model.named_children(): if current_key_name is None: _A = [] current_key_name.append(_snake_case ) if isinstance(_snake_case , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` _A = '.'.join(_snake_case ) _A = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: _A = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: _A = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_snake_case , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: _A = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) _A = module.weight.data if module.bias is not None: _A = module.bias.data bnb_module.requires_grad_(_snake_case ) setattr(_snake_case , _snake_case , _snake_case ) _A = True if len(list(module.children() ) ) > 0: _A , _A = _replace_with_bnb_layers( _snake_case , _snake_case , _snake_case , _snake_case ) _A = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _snake_case ( _snake_case : Any ) -> Optional[int]: '''simple docstring''' with init_empty_weights(): _A = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager` _A = find_tied_parameters(_snake_case ) # For compatibility with Accelerate < 0.18 if isinstance(_snake_case , _snake_case ): _A = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _A = sum(_snake_case , [] ) _A = len(_snake_case ) > 0 # Check if it is a base model _A = False if hasattr(_snake_case , 'base_model_prefix' ): _A = not hasattr(_snake_case , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _A = list(model.named_children() ) _A = [list_modules[-1][0]] # add last module together with tied weights _A = set(_snake_case ) - set(_snake_case ) _A = list(set(_snake_case ) ) + list(_snake_case ) # remove ".weight" from the keys _A = ['.weight', '.bias'] _A = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _A = name.replace(_snake_case , '' ) filtered_module_names.append(_snake_case ) return filtered_module_names def _snake_case ( _snake_case : Tuple ) -> Tuple: '''simple docstring''' for m in model.modules(): if isinstance(_snake_case , bnb.nn.Linearabit ): return True return False def _snake_case ( _snake_case : nn.Module ) -> Optional[Any]: '''simple docstring''' return next(parameter.parameters() ).device def _snake_case ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Any , _snake_case : str ) -> Any: '''simple docstring''' if fpaa_statistics is None: set_module_tensor_to_device(_snake_case , _snake_case , 0 , dtype=_snake_case , value=_snake_case ) _A = param_name _A = model if "." in tensor_name: _A = tensor_name.split('.' ) for split in splits[:-1]: _A = getattr(_snake_case , _snake_case ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) _A = new_module _A = splits[-1] # offload weights _A = False offload_weight(module._parameters[tensor_name] , _snake_case , _snake_case , index=_snake_case ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _snake_case , index=_snake_case , ) else: offload_weight(_snake_case , _snake_case , _snake_case , index=_snake_case ) offload_weight(_snake_case , param_name.replace('weight' , 'SCB' ) , _snake_case , index=_snake_case ) set_module_tensor_to_device(_snake_case , _snake_case , 'meta' , dtype=_snake_case , value=torch.empty(*param.size() ) )
315
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
1
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowercase_ : '''simple docstring''' def __init__( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : List[Any]=19 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : str=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[int]=None , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope def lowerCAmelCase_ ( self : int ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): _A = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_UpperCAmelCase , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ): _A = EsmForProteinFolding(config=_UpperCAmelCase ).float() model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) _A = model(_UpperCAmelCase ) _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowerCAmelCase_ ( self : int ): _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : str = False UpperCAmelCase : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else () UpperCAmelCase : List[Any] = () UpperCAmelCase : Optional[int] = {} if is_torch_available() else {} UpperCAmelCase : List[Any] = False def lowerCAmelCase_ ( self : Optional[Any] ): _A = EsmFoldModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Tuple ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) @unittest.skip('Does not support attention outputs' ) def lowerCAmelCase_ ( self : Tuple ): pass @unittest.skip def lowerCAmelCase_ ( self : List[Any] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowerCAmelCase_ ( self : Any ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowerCAmelCase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support passing input embeds!' ) def lowerCAmelCase_ ( self : List[str] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCAmelCase_ ( self : Dict ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCAmelCase_ ( self : Optional[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCAmelCase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCAmelCase_ ( self : int ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowerCAmelCase_ ( self : Optional[int] ): pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def lowerCAmelCase_ ( self : List[Any] ): pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def lowerCAmelCase_ ( self : Optional[Any] ): pass @unittest.skip('ESMFold only has one output format.' ) def lowerCAmelCase_ ( self : Any ): pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def lowerCAmelCase_ ( self : Tuple ): pass @unittest.skip('ESMFold does not support input chunking.' ) def lowerCAmelCase_ ( self : Tuple ): pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def lowerCAmelCase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowerCAmelCase_ ( self : Tuple ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowerCAmelCase_ ( self : Tuple ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowerCAmelCase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def lowerCAmelCase_ ( self : Tuple ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCAmelCase_ ( self : str ): pass @require_torch class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Tuple ): _A = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() _A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _A = model(_UpperCAmelCase )['positions'] _A = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _UpperCAmelCase , atol=1E-4 ) )
315
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
1
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowercase_ : '''simple docstring''' def __init__( self : Tuple , _UpperCAmelCase : str , ): _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = 'gelu' _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase_ ( self : Any ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : List[Any] ): ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = self.prepare_config_and_inputs() _A = True _A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ): _A = TFEsmModel(config=_UpperCAmelCase ) _A = {'input_ids': input_ids, 'attention_mask': input_mask} _A = model(_UpperCAmelCase ) _A = [input_ids, input_mask] _A = model(_UpperCAmelCase ) _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , ): _A = True _A = TFEsmModel(config=_UpperCAmelCase ) _A = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } _A = model(_UpperCAmelCase ) _A = [input_ids, input_mask] _A = model(_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ) # Also check the case where encoder outputs are not passed _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str ): _A = TFEsmForMaskedLM(config=_UpperCAmelCase ) _A = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ): _A = self.num_labels _A = TFEsmForTokenClassification(config=_UpperCAmelCase ) _A = {'input_ids': input_ids, 'attention_mask': input_mask} _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Optional[int] = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase : int = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase : int = False UpperCAmelCase : Tuple = False def lowerCAmelCase_ ( self : List[str] ): _A = TFEsmModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Tuple ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Optional[int] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Any ): _A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : str ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFEsmModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skip('Protein models do not support embedding resizing.' ) def lowerCAmelCase_ ( self : Union[str, Any] ): pass @unittest.skip('Protein models do not support embedding resizing.' ) def lowerCAmelCase_ ( self : Optional[int] ): pass def lowerCAmelCase_ ( self : str ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(_UpperCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer _A = model.get_bias() assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) for k, v in name.items(): assert isinstance(_UpperCAmelCase , tf.Variable ) else: _A = model.get_output_embeddings() assert x is None _A = model.get_bias() assert name is None @require_tf class lowercase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Tuple ): _A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(_UpperCAmelCase )[0] _A = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , _UpperCAmelCase ) # compare the actual values for a slice. _A = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def lowerCAmelCase_ ( self : Dict ): _A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) _A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _A = model(_UpperCAmelCase )[0] # compare the actual values for a slice. _A = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
315
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
1
"""simple docstring""" def _snake_case ( _snake_case : int = 50 ) -> int: '''simple docstring''' _A = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
315
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
1
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer a = logging.get_logger(__name__) a = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} a = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } a = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } a = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } a = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } a = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } a = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } a = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } a = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } a = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase : Union[str, Any] = DPRContextEncoderTokenizer class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = VOCAB_FILES_NAMES UpperCAmelCase : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase : List[Any] = DPRQuestionEncoderTokenizer a = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) a = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) a = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(__lowerCAmelCase ) class lowercase_ : '''simple docstring''' def __call__( self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[bool] = None , **_UpperCAmelCase : int , ): if titles is None and texts is None: return super().__call__( _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) elif titles is None or texts is None: _A = titles if texts is None else texts return super().__call__( _UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [titles] _A = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [texts] _A = len(_UpperCAmelCase ) _A = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [questions] * n_passages assert len(_UpperCAmelCase ) == len( _UpperCAmelCase ), F'''There should be as many titles than texts but got {len(_UpperCAmelCase )} titles and {len(_UpperCAmelCase )} texts.''' _A = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )['input_ids'] _A = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )['input_ids'] _A = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase ) ] } if return_attention_mask is not False: _A = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) _A = attention_mask return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : BatchEncoding , _UpperCAmelCase : DPRReaderOutput , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 4 , ): _A = reader_input['input_ids'] _A , _A , _A = reader_output[:3] _A = len(_UpperCAmelCase ) _A = sorted(range(_UpperCAmelCase ) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__ ) _A = [] for doc_id in sorted_docs: _A = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence _A = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: _A = sequence_ids.index(self.pad_token_id ) else: _A = len(_UpperCAmelCase ) _A = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(_UpperCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , ): _A = [] for start_index, start_score in enumerate(_UpperCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) _A = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) _A = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]''' _A = end_index - start_index + 1 assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_UpperCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__lowerCAmelCase ) class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES UpperCAmelCase : str = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Tuple = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase : Tuple = ['''input_ids''', '''attention_mask'''] UpperCAmelCase : Optional[Any] = DPRReaderTokenizer
315
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowercase_ ( enum.Enum ): '''simple docstring''' UpperCAmelCase : int = 0 UpperCAmelCase : Any = 1 UpperCAmelCase : Tuple = 2 @add_end_docstrings(__lowerCAmelCase ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self : str , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _A = None if self.model.config.prefix is not None: _A = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _A = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _A , _A , _A = self._sanitize_parameters(prefix=_UpperCAmelCase , **self._forward_params ) _A = {**self._preprocess_params, **preprocess_params} _A = {**self._forward_params, **forward_params} def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : List[Any] , ): _A = {} if prefix is not None: _A = prefix if prefix: _A = self.tokenizer( _UpperCAmelCase , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=self.framework ) _A = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ' [None, \'hole\']' ) _A = handle_long_generation preprocess_params.update(_UpperCAmelCase ) _A = generate_kwargs _A = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) _A = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) _A = ReturnType.TENSORS if return_type is not None: _A = return_type if clean_up_tokenization_spaces is not None: _A = clean_up_tokenization_spaces if stop_sequence is not None: _A = self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) _A = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def lowerCAmelCase_ ( self : Optional[int] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*_UpperCAmelCase , **_UpperCAmelCase ) def __call__( self : List[Any] , _UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ): return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : int="" , _UpperCAmelCase : Any=None , **_UpperCAmelCase : Optional[int] ): _A = self.tokenizer( prefix + prompt_text , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=self.framework ) _A = prompt_text if handle_long_generation == "hole": _A = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: _A = generate_kwargs['max_new_tokens'] else: _A = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _A = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) _A = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: _A = inputs['attention_mask'][:, -keep_length:] return inputs def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Dict ): _A = model_inputs['input_ids'] _A = model_inputs.get('attention_mask' , _UpperCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: _A = None _A = None _A = 1 else: _A = input_ids.shape[0] _A = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _A = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: _A = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: _A = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _A = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _A = self.model.generate(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , **_UpperCAmelCase ) _A = generated_sequence.shape[0] if self.framework == "pt": _A = generated_sequence.reshape(_UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _A = tf.reshape(_UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=ReturnType.FULL_TEXT , _UpperCAmelCase : str=True ): _A = model_outputs['generated_sequence'][0] _A = model_outputs['input_ids'] _A = model_outputs['prompt_text'] _A = generated_sequence.numpy().tolist() _A = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _A = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _A = self.tokenizer.decode( _UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _A = 0 else: _A = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: _A = prompt_text + text[prompt_length:] else: _A = text[prompt_length:] _A = {'generated_text': all_text} records.append(_UpperCAmelCase ) return records
315
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
1
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> int: '''simple docstring''' while b: _A , _A = b, a % b return a def _snake_case ( _snake_case : int , _snake_case : int ) -> int: '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(_snake_case , a % b ) def _snake_case ( ) -> Dict: '''simple docstring''' print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
315
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue_model_parallelism.py''', '''model_name_or_path''': '''roberta-large''', '''instance_type''': '''ml.p3dn.24xlarge''', '''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2}, }, { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''roberta-large''', '''instance_type''': '''ml.p3dn.24xlarge''', '''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2}, }, ] ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[Any] ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=_UpperCAmelCase , ) assert hasattr(self , 'env' ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Dict ): # configuration for running training on smdistributed Model Parallel _A = { 'enabled': True, 'processes_per_host': 8, } _A = { 'enabled': True, 'parameters': { 'microbatches': 4, 'placement_strategy': 'spread', 'pipeline': 'interleaved', 'optimize': 'speed', 'partitions': 4, 'ddp': True, }, } _A = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options} _A = 'trainer' if self.script == 'run_glue.py' else 'smtrainer' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={ **self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path, 'max_steps': 500, } , metric_definitions=self.env.metric_definitions , distribution=_UpperCAmelCase , py_version='py36' , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[str] ): TrainingJobAnalytics(_UpperCAmelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Any] ): # create estimator _A = self.create_estimator(_UpperCAmelCase ) # run training estimator.fit() # result dataframe _A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) _A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _UpperCAmelCase )
315
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
1
"""simple docstring""" import socket def _snake_case ( ) -> str: '''simple docstring''' _A = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _A = socket.gethostname() _A = 1_23_12 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _A = sock.recv(10_24 ) if not data: break out_file.write(_snake_case ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
315
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[str] = '''Salesforce/blip-image-captioning-base''' UpperCAmelCase : Tuple = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) UpperCAmelCase : List[str] = '''image_captioner''' UpperCAmelCase : Optional[int] = AutoModelForVisionaSeq UpperCAmelCase : str = ['''image'''] UpperCAmelCase : str = ['''text'''] def __init__( self : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[Any] ): requires_backends(self , ['vision'] ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : "Image" ): return self.pre_processor(images=_UpperCAmelCase , return_tensors='pt' ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Any ): return self.model.generate(**_UpperCAmelCase ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Tuple ): return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0].strip()
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : str , _snake_case : str ) -> bool: '''simple docstring''' _A = get_failure_array(_snake_case ) # 2) Step through text searching for pattern _A , _A = 0, 0 # index into text, pattern while i < len(_snake_case ): if pattern[j] == text[i]: if j == (len(_snake_case ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _A = failure[j - 1] continue i += 1 return False def _snake_case ( _snake_case : str ) -> list[int]: '''simple docstring''' _A = [0] _A = 0 _A = 1 while j < len(_snake_case ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _A = failure[i - 1] continue j += 1 failure.append(_snake_case ) return failure if __name__ == "__main__": # Test 1) a = '''abc1abc12''' a = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' a = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) a = '''ABABX''' a = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) a = '''AAAB''' a = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) a = '''abcdabcy''' a = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) a = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Tuple ): _A = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'tf_padding' ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'depth_multiplier' ) ) class lowercase_ : '''simple docstring''' def __init__( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any]=13 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Any=0.25 , _UpperCAmelCase : Optional[Any]=8 , _UpperCAmelCase : int=8 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Any="relu6" , _UpperCAmelCase : Tuple=1_280 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : List[Any]=None , ): _A = parent _A = batch_size _A = num_channels _A = image_size _A = depth_multiplier _A = depth_divisible_by _A = min_depth _A = expand_ratio _A = tf_padding _A = output_stride _A = first_layer_is_expansion _A = finegrained_output _A = hidden_act _A = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) _A = classifier_dropout_prob _A = use_labels _A = is_training _A = num_labels _A = initializer_range _A = scope def lowerCAmelCase_ ( self : str ): _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.num_labels ) _A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _A = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCAmelCase_ ( self : List[str] ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ): _A = MobileNetVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ): _A = self.num_labels _A = MobileNetVaForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ): _A = self.num_labels _A = MobileNetVaForSemanticSegmentation(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _A = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.prepare_config_and_inputs() _A , _A , _A , _A = config_and_inputs _A = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Any = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCAmelCase : List[Any] = ( { '''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification, '''image-segmentation''': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase : Optional[int] = False UpperCAmelCase : Dict = False UpperCAmelCase : Optional[int] = False UpperCAmelCase : str = False def lowerCAmelCase_ ( self : List[Any] ): _A = MobileNetVaModelTester(self ) _A = MobileNetVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='MobileNetV2 does not use inputs_embeds' ) def lowerCAmelCase_ ( self : int ): pass @unittest.skip(reason='MobileNetV2 does not support input and output embeddings' ) def lowerCAmelCase_ ( self : List[Any] ): pass @unittest.skip(reason='MobileNetV2 does not output attentions' ) def lowerCAmelCase_ ( self : Union[str, Any] ): pass def lowerCAmelCase_ ( self : List[str] ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(_UpperCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): def check_hidden_states_output(_UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ): _A = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) _A = outputs.hidden_states _A = 16 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = MobileNetVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _snake_case ( ) -> Dict: '''simple docstring''' _A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[int] ): return ( MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self : Tuple ): _A = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_UpperCAmelCase ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _A = model(**_UpperCAmelCase ) # verify the logits _A = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) _A = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCAmelCase_ ( self : int ): _A = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) _A = model.to(_UpperCAmelCase ) _A = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) _A = prepare_img() _A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _A = model(**_UpperCAmelCase ) _A = outputs.logits # verify the logits _A = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , _UpperCAmelCase ) _A = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ] , device=_UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowercase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : int ): _A = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_UpperCAmelCase ).to(_UpperCAmelCase ) _A = AutoTokenizer.from_pretrained('google/mt5-small' ) _A = tokenizer('Hello there' , return_tensors='pt' ).input_ids _A = tokenizer('Hi I am' , return_tensors='pt' ).input_ids _A = model(input_ids.to(_UpperCAmelCase ) , labels=labels.to(_UpperCAmelCase ) ).loss _A = -(labels.shape[-1] * loss.item()) _A = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
315
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
1
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''huggingface/time-series-transformer-tourism-monthly''': ( '''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json''' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[int] = '''time_series_transformer''' UpperCAmelCase : Any = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : str = "student_t" , _UpperCAmelCase : str = "nll" , _UpperCAmelCase : int = 1 , _UpperCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , _UpperCAmelCase : Optional[Union[str, bool]] = "mean" , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , _UpperCAmelCase : str = "gelu" , _UpperCAmelCase : int = 64 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : int = 100 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : List[Any]=True , **_UpperCAmelCase : Optional[int] , ): # time series specific configuration _A = prediction_length _A = context_length or prediction_length _A = distribution_output _A = loss _A = input_size _A = num_time_features _A = lags_sequence _A = scaling _A = num_dynamic_real_features _A = num_static_real_features _A = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(_UpperCAmelCase ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) _A = cardinality else: _A = [0] if embedding_dimension and num_static_categorical_features > 0: if len(_UpperCAmelCase ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) _A = embedding_dimension else: _A = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _A = num_parallel_samples # Transformer architecture configuration _A = input_size * len(_UpperCAmelCase ) + self._number_of_features _A = d_model _A = encoder_attention_heads _A = decoder_attention_heads _A = encoder_ffn_dim _A = decoder_ffn_dim _A = encoder_layers _A = decoder_layers _A = dropout _A = attention_dropout _A = activation_dropout _A = encoder_layerdrop _A = decoder_layerdrop _A = activation_function _A = init_std _A = use_cache super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
315
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" def _snake_case ( _snake_case : int ) -> bool: '''simple docstring''' if num < 0: return False _A = num _A = 0 while num > 0: _A = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
1
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = '''mobilenet_v1''' def __init__( self : List[str] , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : str=8 , _UpperCAmelCase : int="relu6" , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : Any=0.001 , **_UpperCAmelCase : Dict , ): super().__init__(**_UpperCAmelCase ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _A = num_channels _A = image_size _A = depth_multiplier _A = min_depth _A = hidden_act _A = tf_padding _A = classifier_dropout_prob _A = initializer_range _A = layer_norm_eps class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = version.parse('''1.11''' ) @property def lowerCAmelCase_ ( self : Dict ): return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def lowerCAmelCase_ ( self : List[str] ): if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def lowerCAmelCase_ ( self : Tuple ): return 1E-4
315
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger(__name__) def _snake_case ( _snake_case : Any , _snake_case : Tuple=False ) -> List[Any]: '''simple docstring''' _A = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'vit.embeddings.cls_token'), ('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def _snake_case ( _snake_case : str , _snake_case : List[Any] , _snake_case : List[Any]=False ) -> Optional[int]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = '' else: _A = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def _snake_case ( _snake_case : Optional[int] ) -> Tuple: '''simple docstring''' _A = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def _snake_case ( _snake_case : str , _snake_case : int , _snake_case : Union[str, Any] ) -> int: '''simple docstring''' _A = dct.pop(_snake_case ) _A = val def _snake_case ( ) -> int: '''simple docstring''' _A = 'http://images.cocodataset.org/val2017/000000039769.jpg' _A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return im @torch.no_grad() def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Any=True ) -> str: '''simple docstring''' _A = ViTConfig() # patch_size if model_name[-1] == "8": _A = 8 # set labels if required if not base_model: _A = 10_00 _A = 'huggingface/label-files' _A = 'imagenet-1k-id2label.json' _A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) ) _A = {int(_snake_case ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: _A = 3_84 _A = 15_36 _A = 12 _A = 6 # load original model from torch hub _A = torch.hub.load('facebookresearch/dino:main' , _snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys _A = original_model.state_dict() if base_model: remove_classification_head_(_snake_case ) _A = create_rename_keys(_snake_case , base_model=_snake_case ) for src, dest in rename_keys: rename_key(_snake_case , _snake_case , _snake_case ) read_in_q_k_v(_snake_case , _snake_case , _snake_case ) # load HuggingFace model if base_model: _A = ViTModel(_snake_case , add_pooling_layer=_snake_case ).eval() else: _A = ViTForImageClassification(_snake_case ).eval() model.load_state_dict(_snake_case ) # Check outputs on an image, prepared by ViTImageProcessor _A = ViTImageProcessor() _A = image_processor(images=prepare_img() , return_tensors='pt' ) _A = encoding['pixel_values'] _A = model(_snake_case ) if base_model: _A = original_model(_snake_case ) assert torch.allclose(_snake_case , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: _A = original_model(_snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''dino_vitb16''', type=str, help='''Name of the model trained with DINO you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether to only convert the base model (no projection head weights).''', ) parser.set_defaults(base_model=True) a = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
315
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[str] = '''timm_backbone''' def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : int=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : Union[str, Any] , ): super().__init__(**_UpperCAmelCase ) _A = backbone _A = num_channels _A = features_only _A = use_pretrained_backbone _A = True _A = out_indices if out_indices is not None else (-1,)
315
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import string import numpy def _snake_case ( _snake_case : int , _snake_case : int ) -> int: '''simple docstring''' return b if a == 0 else greatest_common_divisor(b % a , _snake_case ) class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[int] = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) UpperCAmelCase : int = numpy.vectorize(lambda __lowerCAmelCase : x % 36 ) UpperCAmelCase : Union[str, Any] = numpy.vectorize(__lowerCAmelCase ) def __init__( self : List[Any] , _UpperCAmelCase : numpy.ndarray ): _A = self.modulus(_UpperCAmelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key _A = encrypt_key.shape[0] def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : str ): return self.key_string.index(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : int ): return self.key_string[round(_UpperCAmelCase )] def lowerCAmelCase_ ( self : List[str] ): _A = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _A = det % len(self.key_string ) _A = len(self.key_string ) if greatest_common_divisor(_UpperCAmelCase , len(self.key_string ) ) != 1: _A = ( F'''determinant modular {req_l} of encryption key({det}) ''' F'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(_UpperCAmelCase ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str ): _A = [char for char in text.upper() if char in self.key_string] _A = chars[-1] while len(_UpperCAmelCase ) % self.break_key != 0: chars.append(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str ): _A = self.process_text(text.upper() ) _A = '' for i in range(0 , len(_UpperCAmelCase ) - self.break_key + 1 , self.break_key ): _A = text[i : i + self.break_key] _A = [self.replace_letters(_UpperCAmelCase ) for char in batch] _A = numpy.array([vec] ).T _A = self.modulus(self.encrypt_key.dot(_UpperCAmelCase ) ).T.tolist()[ 0 ] _A = ''.join( self.replace_digits(_UpperCAmelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def lowerCAmelCase_ ( self : Dict ): _A = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _A = det % len(self.key_string ) _A = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: _A = i break _A = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str ): _A = self.make_decrypt_key() _A = self.process_text(text.upper() ) _A = '' for i in range(0 , len(_UpperCAmelCase ) - self.break_key + 1 , self.break_key ): _A = text[i : i + self.break_key] _A = [self.replace_letters(_UpperCAmelCase ) for char in batch] _A = numpy.array([vec] ).T _A = self.modulus(decrypt_key.dot(_UpperCAmelCase ) ).T.tolist()[0] _A = ''.join( self.replace_digits(_UpperCAmelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def _snake_case ( ) -> None: '''simple docstring''' _A = int(input('Enter the order of the encryption key: ' ) ) _A = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(_snake_case ): _A = [int(_snake_case ) for x in input().split()] hill_matrix.append(_snake_case ) _A = HillCipher(numpy.array(_snake_case ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) _A = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": _A = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(_snake_case ) ) elif option == "2": _A = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(_snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
315
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
1
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a = '''CompVis/stable-diffusion-v1-1''' a = '''CompVis/stable-diffusion-v1-2''' a = '''CompVis/stable-diffusion-v1-3''' a = '''CompVis/stable-diffusion-v1-4''' class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , _UpperCAmelCase : bool = True , ): super()._init_() _A = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ) _A = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ) _A = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ) _A = StableDiffusionPipeline( vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , requires_safety_checker=_UpperCAmelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def lowerCAmelCase_ ( self : int ): return {k: getattr(self , _UpperCAmelCase ) for k in self.config.keys() if not k.startswith('_' )} def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _A = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): self.enable_attention_slicing(_UpperCAmelCase ) @torch.no_grad() def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , **_UpperCAmelCase : Dict , ): return self.pipea( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) @torch.no_grad() def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , **_UpperCAmelCase : List[str] , ): return self.pipea( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) @torch.no_grad() def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , **_UpperCAmelCase : int , ): return self.pipea( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) @torch.no_grad() def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , **_UpperCAmelCase : List[Any] , ): return self.pipea( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) @torch.no_grad() def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , **_UpperCAmelCase : int , ): _A = 'cuda' if torch.cuda.is_available() else 'cpu' self.to(_UpperCAmelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 _A = self.textaimg_sda_a( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 _A = self.textaimg_sda_a( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 _A = self.textaimg_sda_a( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 _A = self.textaimg_sda_a( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
1
"""simple docstring""" from __future__ import annotations import typing from collections.abc import Iterable import numpy as np a = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 a = typing.Union[np.floataa, int, float] # noqa: UP007 def _snake_case ( _snake_case : Vector , _snake_case : Vector ) -> VectorOut: '''simple docstring''' return np.sqrt(np.sum((np.asarray(_snake_case ) - np.asarray(_snake_case )) ** 2 ) ) def _snake_case ( _snake_case : Vector , _snake_case : Vector ) -> VectorOut: '''simple docstring''' return sum((va - va) ** 2 for va, va in zip(_snake_case , _snake_case ) ) ** (1 / 2) if __name__ == "__main__": def _snake_case ( ) -> None: '''simple docstring''' from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) ) benchmark()
315
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass a = (3, 9, -11, 0, 7, 5, 1, -1) a = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : Node | None class lowercase_ : '''simple docstring''' def __init__( self : int , _UpperCAmelCase : Iterable[int] ): _A = None for i in sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ): _A = Node(_UpperCAmelCase , self.head ) def __iter__( self : Union[str, Any] ): _A = self.head while node: yield node.data _A = node.next_node def __len__( self : Optional[int] ): return sum(1 for _ in self ) def __str__( self : Dict ): return " -> ".join([str(_UpperCAmelCase ) for node in self] ) def _snake_case ( _snake_case : SortedLinkedList , _snake_case : SortedLinkedList ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(_snake_case ) + list(_snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() a = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
315
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
1
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def _snake_case ( _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Optional[int] ) -> List[Any]: '''simple docstring''' _A = AutoConfig.from_pretrained(_snake_case ) _A = FlaxAutoModelForSeqaSeqLM.from_config(config=_snake_case ) _A = checkpoints.load_tax_checkpoint(_snake_case ) _A = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _A = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _A = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _A = F'''layers_{str(_snake_case )}''' # Self-Attention _A = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _A = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _A = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _A = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _A = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _A = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _A = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _A = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _A = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _A = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _A = flax_model.params['encoder']['block'][str(_snake_case )]['layer'] _A = tax_attention_key _A = tax_attention_out _A = tax_attention_query _A = tax_attention_value _A = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = tax_global_layer_norm if split_mlp_wi: _A = tax_mlp_wi_a _A = tax_mlp_wi_a else: _A = tax_mlp_wi _A = tax_mlp_wo _A = tax_mlp_layer_norm _A = flax_model_encoder_layer_block # Only for layer 0: _A = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _A = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _A = tax_encoder_global_rel_embedding # Assigning _A = tax_model['target']['encoder']['encoder_norm']['scale'] _A = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _A = F'''layers_{str(_snake_case )}''' # Self-Attention _A = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _A = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _A = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _A = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _A = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _A = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _A = tax_enc_dec_attention_module['key']['kernel'] _A = tax_enc_dec_attention_module['out']['kernel'] _A = tax_enc_dec_attention_module['query']['kernel'] _A = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _A = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _A = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _A = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _A = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _A = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _A = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _A = flax_model.params['decoder']['block'][str(_snake_case )]['layer'] _A = tax_attention_key _A = tax_attention_out _A = tax_attention_query _A = tax_attention_value _A = tax_pre_attention_layer_norm _A = tax_enc_dec_attention_key _A = tax_enc_dec_attention_out _A = tax_enc_dec_attention_query _A = tax_enc_dec_attention_value _A = tax_cross_layer_norm if split_mlp_wi: _A = tax_mlp_wi_a _A = tax_mlp_wi_a else: _A = tax_mlp_wi _A = tax_mlp_wo _A = txa_mlp_layer_norm _A = flax_model_decoder_layer_block # Decoder Normalization _A = tax_model['target']['decoder']['decoder_norm']['scale'] _A = txa_decoder_norm # Only for layer 0: _A = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _A = tax_decoder_rel_embedding # Token Embeddings _A = tax_model['target']['token_embedder']['embedding'] _A = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _A = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(_snake_case ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) a = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
315
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all BART models at https://huggingface.co/models?filter=bart a = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, } a = { '''facebook/bart-base''': 1_024, '''facebook/bart-large''': 1_024, '''facebook/bart-large-mnli''': 1_024, '''facebook/bart-large-cnn''': 1_024, '''facebook/bart-large-xsum''': 1_024, '''yjernite/bart_eli5''': 1_024, } @lru_cache() def _snake_case ( ) -> Optional[Any]: '''simple docstring''' _A = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _A = bs[:] _A = 0 for b in range(2**8 ): if b not in bs: bs.append(_snake_case ) cs.append(2**8 + n ) n += 1 _A = [chr(_snake_case ) for n in cs] return dict(zip(_snake_case , _snake_case ) ) def _snake_case ( _snake_case : Tuple ) -> Tuple: '''simple docstring''' _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Optional[Any] = ['''input_ids''', '''attention_mask'''] def __init__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict="replace" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Union[str, Any]="</s>" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : Tuple="<pad>" , _UpperCAmelCase : Dict="<mask>" , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : Optional[Any] , ): _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle: _A = json.load(_UpperCAmelCase ) _A = {v: k for k, v in self.encoder.items()} _A = errors # how to handle errors in decoding _A = bytes_to_unicode() _A = {v: k for k, v in self.byte_encoder.items()} with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle: _A = merges_handle.read().split('\n' )[1:-1] _A = [tuple(merge.split() ) for merge in bpe_merges] _A = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) _A = {} _A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _A = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def lowerCAmelCase_ ( self : Dict ): return len(self.encoder ) def lowerCAmelCase_ ( self : Optional[int] ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[Any] ): if token in self.cache: return self.cache[token] _A = tuple(_UpperCAmelCase ) _A = get_pairs(_UpperCAmelCase ) if not pairs: return token while True: _A = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(_UpperCAmelCase ): try: _A = word.index(_UpperCAmelCase , _UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(_UpperCAmelCase ) _A = new_word if len(_UpperCAmelCase ) == 1: break else: _A = get_pairs(_UpperCAmelCase ) _A = ' '.join(_UpperCAmelCase ) _A = word return word def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[str] ): _A = [] for token in re.findall(self.pat , _UpperCAmelCase ): _A = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(' ' ) ) return bpe_tokens def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[str] ): return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ): return self.decoder.get(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Dict ): _A = ''.join(_UpperCAmelCase ) _A = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _A = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '\n' ) _A = 0 with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _A = token_index writer.write(' '.join(_UpperCAmelCase ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int=False , **_UpperCAmelCase : Optional[Any] ): _A = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()): _A = ' ' + text return (text, kwargs)
315
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
1
"""simple docstring""" import numpy as np import datasets a = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' a = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' a = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ), } ) , ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): # convert to numpy arrays _A = np.array(_UpperCAmelCase ) _A = np.array(_UpperCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('Expected `X` to be a 2D vector' ) if len(reference_distribution.shape ) != 2: raise ValueError('Expected `reference_distribution` to be a 2D vector' ) if reference_distribution.shape[0] < 2: raise ValueError( 'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' ) # Get mahalanobis distance for each prediction _A = X - np.mean(_UpperCAmelCase ) _A = np.cov(reference_distribution.T ) try: _A = np.linalg.inv(_UpperCAmelCase ) except np.linalg.LinAlgError: _A = np.linalg.pinv(_UpperCAmelCase ) _A = np.dot(_UpperCAmelCase , _UpperCAmelCase ) _A = np.dot(_UpperCAmelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
315
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
1
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) a = logging.getLogger(__name__) a = tf.data.AUTOTUNE def _snake_case ( ) -> List[str]: '''simple docstring''' _A = argparse.ArgumentParser(description='Train a masked language model on TPU.' ) parser.add_argument( '--pretrained_model_config' , type=_snake_case , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , ) parser.add_argument( '--tokenizer' , type=_snake_case , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , ) parser.add_argument( '--per_replica_batch_size' , type=_snake_case , default=8 , help='Batch size per TPU core.' , ) parser.add_argument( '--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , ) parser.add_argument( '--tpu_name' , type=_snake_case , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , ) parser.add_argument( '--tpu_zone' , type=_snake_case , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , ) parser.add_argument( '--gcp_project' , type=_snake_case , help='Google cloud project name. Only used for non-Colab TPU nodes.' ) parser.add_argument( '--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , ) parser.add_argument( '--train_dataset' , type=_snake_case , help='Path to training dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.' , ) parser.add_argument( '--shuffle_buffer_size' , type=_snake_case , default=2**18 , help='Size of the shuffle buffer (in samples)' , ) parser.add_argument( '--eval_dataset' , type=_snake_case , help='Path to evaluation dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.' , ) parser.add_argument( '--num_epochs' , type=_snake_case , default=1 , help='Number of epochs to train for.' , ) parser.add_argument( '--learning_rate' , type=_snake_case , default=1E-4 , help='Learning rate to use for training.' , ) parser.add_argument( '--weight_decay_rate' , type=_snake_case , default=1E-3 , help='Weight decay rate to use for training.' , ) parser.add_argument( '--max_length' , type=_snake_case , default=5_12 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , ) parser.add_argument( '--mlm_probability' , type=_snake_case , default=0.15 , help='Fraction of tokens to mask during training.' , ) parser.add_argument('--output_dir' , type=_snake_case , required=_snake_case , help='Path to save model checkpoints to.' ) parser.add_argument('--hub_model_id' , type=_snake_case , help='Model ID to upload to on the Hugging Face Hub.' ) _A = parser.parse_args() return args def _snake_case ( _snake_case : int ) -> Union[str, Any]: '''simple docstring''' try: if args.tpu_name: _A = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _A = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( 'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ' '--gcp_project. When running on a TPU VM, use --tpu_name local.' ) tf.config.experimental_connect_to_cluster(_snake_case ) tf.tpu.experimental.initialize_tpu_system(_snake_case ) return tpu def _snake_case ( _snake_case : Dict ) -> List[Any]: '''simple docstring''' _A = 0 for file in file_list: _A = file.split('/' )[-1] _A = re.search(R'-\d+-(\d+)\.tfrecord' , _snake_case ).group(1 ) _A = int(_snake_case ) num_samples += sample_count return num_samples def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : str , _snake_case : int , _snake_case : int=None ) -> int: '''simple docstring''' _A = count_samples(_snake_case ) _A = tf.data.Dataset.from_tensor_slices(_snake_case ) if shuffle: _A = dataset.shuffle(len(_snake_case ) ) _A = tf.data.TFRecordDataset(_snake_case , num_parallel_reads=_snake_case ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _A = dataset.apply(tf.data.experimental.assert_cardinality(_snake_case ) ) _A = dataset.map(_snake_case , num_parallel_calls=_snake_case ) if shuffle: assert shuffle_buffer_size is not None _A = dataset.shuffle(args.shuffle_buffer_size ) _A = dataset.batch(_snake_case , drop_remainder=_snake_case ) _A = dataset.map(_snake_case , num_parallel_calls=_snake_case ) _A = dataset.prefetch(_snake_case ) return dataset def _snake_case ( _snake_case : Optional[int] ) -> str: '''simple docstring''' if not args.no_tpu: _A = initialize_tpu(_snake_case ) _A = tf.distribute.TPUStrategy(_snake_case ) else: _A = tf.distribute.OneDeviceStrategy(device='/gpu:0' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' ) _A = AutoTokenizer.from_pretrained(args.tokenizer ) _A = AutoConfig.from_pretrained(args.pretrained_model_config ) _A = tokenizer.vocab_size _A = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) ) if not training_records: raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' ) _A = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) ) if not eval_records: raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' ) _A = count_samples(_snake_case ) _A = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _A = steps_per_epoch * args.num_epochs with strategy.scope(): _A = TFAutoModelForMaskedLM.from_config(_snake_case ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _A , _A = create_optimizer( num_train_steps=_snake_case , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_snake_case , metrics=['accuracy'] ) def decode_fn(_snake_case : Optional[int] ): _A = { 'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), 'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_snake_case , _snake_case ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _A = DataCollatorForLanguageModeling( tokenizer=_snake_case , mlm_probability=args.mlm_probability , mlm=_snake_case , return_tensors='tf' ) def mask_with_collator(_snake_case : Tuple ): # TF really needs an isin() function _A = ( ~tf.cast(batch['attention_mask'] , tf.bool ) | (batch['input_ids'] == tokenizer.cls_token_id) | (batch['input_ids'] == tokenizer.sep_token_id) ) _A , _A = data_collator.tf_mask_tokens( batch['input_ids'] , vocab_size=len(_snake_case ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_snake_case , ) return batch _A = args.per_replica_batch_size * strategy.num_replicas_in_sync _A = prepare_dataset( _snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , shuffle_buffer_size=args.shuffle_buffer_size , ) _A = prepare_dataset( _snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , ) _A = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_snake_case ) ) model.fit( _snake_case , validation_data=_snake_case , epochs=args.num_epochs , callbacks=_snake_case , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": a = parse_args() main(args)
315
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params a = getLogger(__name__) a = '''cuda''' if torch.cuda.is_available() else '''cpu''' def _snake_case ( _snake_case : List[str] , _snake_case : str , _snake_case : str , _snake_case : int = 8 , _snake_case : str = DEFAULT_DEVICE , _snake_case : int=False , _snake_case : List[str]="summarization" , _snake_case : List[str]=None , **_snake_case : str , ) -> Dict: '''simple docstring''' _A = Path(_snake_case ).open('w' , encoding='utf-8' ) _A = str(_snake_case ) _A = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).to(_snake_case ) if fpaa: _A = model.half() _A = AutoTokenizer.from_pretrained(_snake_case ) logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. _A = time.time() # update config with task specific params use_task_specific_params(_snake_case , _snake_case ) if prefix is None: _A = prefix or getattr(model.config , 'prefix' , '' ) or '' for examples_chunk in tqdm(list(chunks(_snake_case , _snake_case ) ) ): _A = [prefix + text for text in examples_chunk] _A = tokenizer(_snake_case , return_tensors='pt' , truncation=_snake_case , padding='longest' ).to(_snake_case ) _A = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_snake_case , ) _A = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) for hypothesis in dec: fout.write(hypothesis + '\n' ) fout.flush() fout.close() _A = int(time.time() - start_time ) # seconds _A = len(_snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def _snake_case ( ) -> List[Any]: '''simple docstring''' return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' ) def _snake_case ( _snake_case : List[Any]=True ) -> str: '''simple docstring''' _A = argparse.ArgumentParser() parser.add_argument('model_name' , type=_snake_case , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('input_path' , type=_snake_case , help='like cnn_dm/test.source' ) parser.add_argument('save_path' , type=_snake_case , help='where to save summaries' ) parser.add_argument('--reference_path' , type=_snake_case , required=_snake_case , help='like cnn_dm/test.target' ) parser.add_argument('--score_path' , type=_snake_case , required=_snake_case , default='metrics.json' , help='where to save metrics' ) parser.add_argument('--device' , type=_snake_case , required=_snake_case , default=_snake_case , help='cuda, cuda:1, cpu etc.' ) parser.add_argument( '--prefix' , type=_snake_case , required=_snake_case , default=_snake_case , help='will be added to the begininng of src examples' ) parser.add_argument('--task' , type=_snake_case , default='summarization' , help='used for task_specific_params + metrics' ) parser.add_argument('--bs' , type=_snake_case , default=8 , required=_snake_case , help='batch size' ) parser.add_argument( '--n_obs' , type=_snake_case , default=-1 , required=_snake_case , help='How many observations. Defaults to all.' ) parser.add_argument('--fp16' , action='store_true' ) parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' ) parser.add_argument( '--info' , nargs='?' , type=_snake_case , const=datetime_now() , help=( 'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.' ' lang=en-ru. If no value is passed, the current datetime string will be used.' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate _A , _A = parser.parse_known_args() _A = parse_numeric_n_bool_cl_kwargs(_snake_case ) if parsed_args and verbose: print(F'''parsed the following generate kwargs: {parsed_args}''' ) _A = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: _A = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=_snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('Can\'t mix --fp16 and --device cpu' ) _A = generate_summaries_or_translations( _snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_snake_case , ) if args.reference_path is None: return {} # Compute scores _A = calculate_bleu if 'translation' in args.task else calculate_rouge _A = [x.rstrip() for x in open(args.save_path ).readlines()] _A = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_snake_case )] _A = score_fn(_snake_case , _snake_case ) scores.update(_snake_case ) if args.dump_args: scores.update(_snake_case ) if args.info: _A = args.info if verbose: print(_snake_case ) if args.score_path is not None: json.dump(_snake_case , open(args.score_path , 'w' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
315
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
1
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class lowercase_ : '''simple docstring''' def __init__( self : int ): _A = {} def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=1 ): if self.graph.get(_UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _A = [[w, v]] if not self.graph.get(_UpperCAmelCase ): _A = [] def lowerCAmelCase_ ( self : Any ): return list(self.graph ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): if self.graph.get(_UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Dict=-2 , _UpperCAmelCase : Optional[int]=-1 ): if s == d: return [] _A = [] _A = [] if s == -2: _A = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_UpperCAmelCase ) != 0: _A = stack[len(_UpperCAmelCase ) - 1] else: _A = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return visited def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Tuple=-1 ): if c == -1: _A = floor(random() * 10_000 ) + 10 for i in range(_UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _A = floor(random() * c ) + 1 if n != i: self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Dict=-2 ): _A = deque() _A = [] if s == -2: _A = list(self.graph )[0] d.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) while d: _A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[int] ): _A = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Union[str, Any] ): return len(self.graph[u] ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int=-2 ): _A = [] _A = [] if s == -2: _A = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _A = s _A = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _A = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(_UpperCAmelCase ) != 0: _A = stack[len(_UpperCAmelCase ) - 1] else: _A = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return sorted_nodes def lowerCAmelCase_ ( self : str ): _A = [] _A = [] _A = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _A = -2 _A = [] _A = s _A = False _A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _A = len(_UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _A = node[1] break # check if all the children are visited if s == ss: stack.pop() _A = True if len(_UpperCAmelCase ) != 0: _A = stack[len(_UpperCAmelCase ) - 1] else: _A = False indirect_parents.append(_UpperCAmelCase ) _A = s _A = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return list(_UpperCAmelCase ) def lowerCAmelCase_ ( self : str ): _A = [] _A = [] _A = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _A = -2 _A = [] _A = s _A = False _A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _A = len(_UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _A = node[1] break # check if all the children are visited if s == ss: stack.pop() _A = True if len(_UpperCAmelCase ) != 0: _A = stack[len(_UpperCAmelCase ) - 1] else: _A = False indirect_parents.append(_UpperCAmelCase ) _A = s _A = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return False def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str=-2 , _UpperCAmelCase : Union[str, Any]=-1 ): _A = time() self.dfs(_UpperCAmelCase , _UpperCAmelCase ) _A = time() return end - begin def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[Any]=-2 ): _A = time() self.bfs(_UpperCAmelCase ) _A = time() return end - begin class lowercase_ : '''simple docstring''' def __init__( self : str ): _A = {} def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=1 ): # check if the u exists if self.graph.get(_UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _A = [[w, v]] # add the other way if self.graph.get(_UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _A = [[w, u]] def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): if self.graph.get(_UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_UpperCAmelCase ) # the other way round if self.graph.get(_UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[int]=-2 , _UpperCAmelCase : str=-1 ): if s == d: return [] _A = [] _A = [] if s == -2: _A = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_UpperCAmelCase ) != 0: _A = stack[len(_UpperCAmelCase ) - 1] else: _A = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return visited def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str=-1 ): if c == -1: _A = floor(random() * 10_000 ) + 10 for i in range(_UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _A = floor(random() * c ) + 1 if n != i: self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[str]=-2 ): _A = deque() _A = [] if s == -2: _A = list(self.graph )[0] d.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) while d: _A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): return len(self.graph[u] ) def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] _A = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _A = -2 _A = [] _A = s _A = False _A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _A = len(_UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _A = node[1] break # check if all the children are visited if s == ss: stack.pop() _A = True if len(_UpperCAmelCase ) != 0: _A = stack[len(_UpperCAmelCase ) - 1] else: _A = False indirect_parents.append(_UpperCAmelCase ) _A = s _A = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return list(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A = [] _A = [] _A = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _A = -2 _A = [] _A = s _A = False _A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _A = len(_UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _A = node[1] break # check if all the children are visited if s == ss: stack.pop() _A = True if len(_UpperCAmelCase ) != 0: _A = stack[len(_UpperCAmelCase ) - 1] else: _A = False indirect_parents.append(_UpperCAmelCase ) _A = s _A = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return False def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.graph ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[Any]=-2 , _UpperCAmelCase : int=-1 ): _A = time() self.dfs(_UpperCAmelCase , _UpperCAmelCase ) _A = time() return end - begin def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str=-2 ): _A = time() self.bfs(_UpperCAmelCase ) _A = time() return end - begin
315
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
1
"""simple docstring""" import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a = getLogger(__name__) def _snake_case ( _snake_case : List[Any] , _snake_case : str , _snake_case : str , _snake_case : int = 8 , _snake_case : int = 10_24 , _snake_case : int="val" , _snake_case : int=None , _snake_case : Tuple=False , _snake_case : Tuple="summarization" , _snake_case : Union[str, Any]=None , _snake_case : Optional[Any]=1 , _snake_case : Dict = None , _snake_case : str="" , **_snake_case : Union[str, Any] , ) -> Dict: '''simple docstring''' _A = str(_snake_case ) assert local_rank is not None torch.distributed.init_process_group(backend='nccl' , rank=_snake_case ) _A = Path(_snake_case ) _A = save_dir.joinpath(F'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(_snake_case ) _A = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).cuda() if fpaa: _A = model.half() # determine if we need to increase num_beams use_task_specific_params(_snake_case , _snake_case ) # update config with task specific params _A = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A = num_return_sequences _A = AutoTokenizer.from_pretrained(_snake_case ) logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A = tokenizer.model_max_length if prefix is None: _A = prefix or getattr(model.config , 'prefix' , '' ) or '' _A = SeqaSeqDataset( _snake_case , _snake_case , _snake_case , max_target_length=10_24 , type_path=_snake_case , n_obs=_snake_case , prefix=_snake_case , **_snake_case , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A = ds.make_sortish_sampler(_snake_case , distributed=_snake_case , add_extra_examples=_snake_case , shuffle=_snake_case ) _A = DataLoader(_snake_case , sampler=_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn ) _A = [] for batch in tqdm(_snake_case ): _A = model.generate( input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=_snake_case , num_beams=_snake_case , **_snake_case , ) _A = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) _A = batch['ids'] if num_return_sequences > 1: _A = chunks(_snake_case , _snake_case ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(_snake_case ): results.append({'pred': pred, 'id': ids[i].item()} ) save_json(_snake_case , _snake_case ) return results, sampler.num_replicas def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser( epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' ) parser.add_argument('--data_dir' , type=_snake_case , help='like cnn_dm/test.source' ) parser.add_argument( '--model_name' , type=_snake_case , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , ) parser.add_argument('--save_dir' , type=_snake_case , help='where to save' , default='tmp_gen' ) parser.add_argument('--max_source_length' , type=_snake_case , default=_snake_case ) parser.add_argument( '--type_path' , type=_snake_case , default='test' , help='which subset to evaluate typically train/val/test' ) parser.add_argument('--task' , type=_snake_case , default='summarization' , help='used for task_specific_params + metrics' ) parser.add_argument('--bs' , type=_snake_case , default=8 , required=_snake_case , help='batch size' ) parser.add_argument( '--local_rank' , type=_snake_case , default=-1 , required=_snake_case , help='should be passed by distributed.launch' ) parser.add_argument( '--n_obs' , type=_snake_case , default=_snake_case , required=_snake_case , help='How many observations. Defaults to all.' ) parser.add_argument( '--num_return_sequences' , type=_snake_case , default=1 , required=_snake_case , help='How many sequences to return' ) parser.add_argument( '--sync_timeout' , type=_snake_case , default=6_00 , required=_snake_case , help='How long should master process wait for other processes to finish.' , ) parser.add_argument('--src_lang' , type=_snake_case , default=_snake_case , required=_snake_case ) parser.add_argument('--tgt_lang' , type=_snake_case , default=_snake_case , required=_snake_case ) parser.add_argument( '--prefix' , type=_snake_case , required=_snake_case , default=_snake_case , help='will be added to the begininng of src examples' ) parser.add_argument('--fp16' , action='store_true' ) parser.add_argument('--debug' , action='store_true' ) _A = time.time() _A , _A = parser.parse_known_args() _A = parse_numeric_n_bool_cl_kwargs(_snake_case ) if generate_kwargs and args.local_rank <= 0: print(F'''parsed the following generate kwargs: {generate_kwargs}''' ) _A = Path(args.save_dir + '_tmp' ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) # this handles locking. _A = list(json_save_dir.glob('rank_*.json' ) ) if intermediate_files: raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A = {} if args.src_lang is not None: _A = args.src_lang if args.tgt_lang is not None: _A = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=_snake_case ) _A , _A = eval_data_dir( args.data_dir , _snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_snake_case , **_snake_case , ) if args.local_rank <= 0: _A = Path(args.save_dir ) save_dir.mkdir(exist_ok=_snake_case ) _A = gather_results_from_each_node(_snake_case , _snake_case , args.sync_timeout ) _A = combine_partial_results(_snake_case ) if args.num_return_sequences > 1: _A = save_dir.joinpath('pseudolabel_results.json' ) print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(_snake_case , _snake_case ) return _A = Path(args.data_dir ).joinpath(args.type_path + '.target' ) with open(_snake_case ) as f: _A = [x.rstrip() for x in f.readlines()][: len(_snake_case )] # Calculate metrics, save metrics, and save _generations.txt _A = 'translation' in args.task _A = calculate_bleu if calc_bleu else calculate_rouge _A = 'bleu' if calc_bleu else 'rouge' _A = score_fn(_snake_case , _snake_case ) _A = len(_snake_case ) _A = time.time() - start_time _A = round(runtime / metrics['n_obs'] , 4 ) _A = num_replicas # TODO(@stas00): add whatever metadata to metrics _A = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''' ) save_json(_snake_case , _snake_case , indent=_snake_case ) print(_snake_case ) write_txt_file(_snake_case , save_dir.joinpath(F'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(_snake_case , save_dir.joinpath(F'''{args.type_path}.target''' ) ) else: shutil.rmtree(_snake_case ) def _snake_case ( _snake_case : Optional[Any] ) -> List: '''simple docstring''' _A = [] for partial_result in partial_results: records.extend(_snake_case ) _A = sorted(_snake_case , key=lambda _snake_case : x["id"] ) _A = [x['pred'] for x in records] return preds def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] ) -> List[Dict[str, List]]: '''simple docstring''' _A = time.time() logger.info('waiting for all nodes to finish' ) _A = None while (time.time() - start_wait) < timeout: _A = list(save_dir.glob('rank_*.json' ) ) if len(_snake_case ) < num_replicas: continue try: # make sure all json files are fully saved _A = lmap(_snake_case , _snake_case ) return json_data except JSONDecodeError: continue else: raise TimeoutError('Rank 0 gave up on waiting for other processes' ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
315
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
1
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
1
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def _snake_case ( _snake_case : Tuple , _snake_case : List[str] ) -> int: '''simple docstring''' _A = [] for part_id in partition_order: _A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(_snake_case ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Dict: '''simple docstring''' _A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() _A = spark.range(1_00 ).repartition(1 ) _A = Spark(_snake_case ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Dict: '''simple docstring''' _A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() _A = spark.range(10 ).repartition(2 ) _A = [1, 0] _A = _generate_iterable_examples(_snake_case , _snake_case ) # Reverse the partitions. _A = _get_expected_row_ids_and_row_dicts_for_partition_order(_snake_case , _snake_case ) for i, (row_id, row_dict) in enumerate(generate_fn() ): _A , _A = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Tuple: '''simple docstring''' _A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() _A = spark.range(10 ).repartition(1 ) _A = SparkExamplesIterable(_snake_case ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(_snake_case ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Optional[int]: '''simple docstring''' _A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() _A = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('numpy.random.Generator' ) as generator_mock: _A = lambda _snake_case : x.reverse() _A = _get_expected_row_ids_and_row_dicts_for_partition_order(_snake_case , [2, 1, 0] ) _A = SparkExamplesIterable(_snake_case ).shuffle_data_sources(_snake_case ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(_snake_case ): _A , _A = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> str: '''simple docstring''' _A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() _A = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 _A = SparkExamplesIterable(_snake_case ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 _A = _get_expected_row_ids_and_row_dicts_for_partition_order(_snake_case , [0, 2] ) for i, (row_id, row_dict) in enumerate(_snake_case ): _A , _A = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 _A = SparkExamplesIterable(_snake_case ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 _A = _get_expected_row_ids_and_row_dicts_for_partition_order(_snake_case , [1, 3] ) for i, (row_id, row_dict) in enumerate(_snake_case ): _A , _A = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Dict: '''simple docstring''' _A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() _A = spark.range(1_00 ).repartition(1 ) _A = Spark(_snake_case ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_00
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable a = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''DPTFeatureExtractor'''] a = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[Any] = '''levit''' def __init__( self : Dict , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Tuple=[128, 256, 384] , _UpperCAmelCase : int=[4, 8, 12] , _UpperCAmelCase : str=[4, 4, 4] , _UpperCAmelCase : Any=[16, 16, 16] , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : int=[2, 2, 2] , _UpperCAmelCase : Optional[int]=[2, 2, 2] , _UpperCAmelCase : Optional[Any]=0.02 , **_UpperCAmelCase : Union[str, Any] , ): super().__init__(**_UpperCAmelCase ) _A = image_size _A = num_channels _A = kernel_size _A = stride _A = padding _A = hidden_sizes _A = num_attention_heads _A = depths _A = key_dim _A = drop_path_rate _A = patch_size _A = attention_ratio _A = mlp_ratio _A = initializer_range _A = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = version.parse('''1.11''' ) @property def lowerCAmelCase_ ( self : int ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCAmelCase_ ( self : Dict ): return 1E-4
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm a = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex a = 10 a = 256 def _snake_case ( _snake_case : List[str] ) -> Optional[MinHash]: '''simple docstring''' if len(_snake_case ) < MIN_NUM_TOKENS: return None _A = MinHash(num_perm=_snake_case ) for token in set(_snake_case ): min_hash.update(token.encode() ) return min_hash def _snake_case ( _snake_case : str ) -> Set[str]: '''simple docstring''' return {t for t in NON_ALPHA.split(_snake_case ) if len(t.strip() ) > 0} class lowercase_ : '''simple docstring''' def __init__( self : int , *, _UpperCAmelCase : float = 0.85 , ): _A = duplication_jaccard_threshold _A = NUM_PERM _A = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _A = defaultdict(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : MinHash ): _A = self._index.query(_UpperCAmelCase ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(_UpperCAmelCase , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_UpperCAmelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _A = [] for base, duplicates in self._duplicate_clusters.items(): _A = [base] + list(_UpperCAmelCase ) # reformat the cluster to be a list of dict _A = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster] duplicate_clusters.append(_UpperCAmelCase ) return duplicate_clusters def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] ): _A = self.get_duplicate_clusters() with open(_UpperCAmelCase , 'w' ) as f: json.dump(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( _snake_case : Any ) -> Optional[Any]: '''simple docstring''' _A , _A = element _A = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def _snake_case ( _snake_case : Type[Dataset] ) -> List[Any]: '''simple docstring''' with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_snake_case , max_queue_size=1_00_00 ) , chunksize=1_00 , ): if data is not None: yield data def _snake_case ( _snake_case : Type[Dataset] , _snake_case : float ) -> List[str]: '''simple docstring''' _A = DuplicationIndex(duplication_jaccard_threshold=_snake_case ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_snake_case ) ) , max_queue_size=1_00 ) ): di.add(_snake_case , _snake_case ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def _snake_case ( _snake_case : str , _snake_case : str ) -> float: '''simple docstring''' _A = get_tokens(_snake_case ) _A = get_tokens(_snake_case ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) a = None def _snake_case ( _snake_case : List[str] , _snake_case : List[str] ) -> Tuple: '''simple docstring''' _A = [] for elementa in cluster: _A = _shared_dataset[elementa['base_index']]['content'] for elementa in extremes: _A = _shared_dataset[elementa['base_index']]['content'] if jaccard_similarity(_snake_case , _snake_case ) >= jaccard_threshold: elementa["copies"] += 1 break else: _A = 1 extremes.append(_snake_case ) return extremes def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : str ) -> str: '''simple docstring''' global _shared_dataset _A = dataset _A = [] _A = partial(_find_cluster_extremes_shared , jaccard_threshold=_snake_case ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _snake_case , _snake_case , ) , total=len(_snake_case ) , ): extremes_list.append(_snake_case ) return extremes_list def _snake_case ( _snake_case : Type[Dataset] , _snake_case : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: '''simple docstring''' _A = make_duplicate_clusters(_snake_case , _snake_case ) _A = {x['base_index'] for cluster in duplicate_clusters for x in cluster} _A = {} _A = find_extremes(_snake_case , _snake_case , _snake_case ) for extremes in extremes_clusters: for element in extremes: _A = element _A = duplicate_indices - set(extreme_dict.keys() ) _A = dataset.filter(lambda _snake_case , _snake_case : idx not in remove_indices , with_indices=_snake_case ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _A = element['base_index'] in extreme_dict if element["is_extreme"]: _A = extreme_dict[element['base_index']]['copies'] print(F'''Original dataset size: {len(_snake_case )}''' ) print(F'''Number of duplicate clusters: {len(_snake_case )}''' ) print(F'''Files in duplicate cluster: {len(_snake_case )}''' ) print(F'''Unique files in duplicate cluster: {len(_snake_case )}''' ) print(F'''Filtered dataset size: {len(_snake_case )}''' ) return ds_filter, duplicate_clusters
315
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = { '''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''], '''processing_mgp_str''': ['''MgpstrProcessor'''], '''tokenization_mgp_str''': ['''MgpstrTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MgpstrModel''', '''MgpstrPreTrainedModel''', '''MgpstrForSceneTextRecognition''', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
315
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Any ): _A = SMALL_MODEL_IDENTIFIER _A = 'pt' _A = 'tf' def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[str] ): _A = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_UpperCAmelCase ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Tuple ): _A = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase ) model_tf.save_pretrained(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A = 'mock_framework' # Framework provided - return whatever the user provides _A = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_UpperCAmelCase ) _A = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_UpperCAmelCase ) _A = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : str ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_UpperCAmelCase ) _A = FeaturesManager.determine_framework(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_UpperCAmelCase ) _A = FeaturesManager.determine_framework(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_UpperCAmelCase ): _A = FeaturesManager.determine_framework(_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = MagicMock(return_value=_UpperCAmelCase ) with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase ): _A = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _A = MagicMock(return_value=_UpperCAmelCase ) with patch('transformers.onnx.features.is_torch_available' , _UpperCAmelCase ): _A = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch _A = MagicMock(return_value=_UpperCAmelCase ) _A = MagicMock(return_value=_UpperCAmelCase ) with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase ), patch( 'transformers.onnx.features.is_torch_available' , _UpperCAmelCase ): _A = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # Both not in environment -> raise error _A = MagicMock(return_value=_UpperCAmelCase ) _A = MagicMock(return_value=_UpperCAmelCase ) with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase ), patch( 'transformers.onnx.features.is_torch_available' , _UpperCAmelCase ): with self.assertRaises(_UpperCAmelCase ): _A = FeaturesManager.determine_framework(self.test_model )
315
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
1
"""simple docstring""" def _snake_case ( _snake_case : list[list[float]] ) -> list[list[float]]: '''simple docstring''' _A = [] for data in source_data: for i, el in enumerate(_snake_case ): if len(_snake_case ) < i + 1: data_lists.append([] ) data_lists[i].append(float(_snake_case ) ) return data_lists def _snake_case ( _snake_case : list[list[float]] , _snake_case : list[int] ) -> list[list[float]]: '''simple docstring''' _A = [] for dlist, weight in zip(_snake_case , _snake_case ): _A = min(_snake_case ) _A = max(_snake_case ) _A = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _A = F'''Invalid weight of {weight:f} provided''' raise ValueError(_snake_case ) score_lists.append(_snake_case ) return score_lists def _snake_case ( _snake_case : list[list[float]] ) -> list[float]: '''simple docstring''' _A = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(_snake_case ): _A = final_scores[j] + ele return final_scores def _snake_case ( _snake_case : list[list[float]] , _snake_case : list[int] ) -> list[list[float]]: '''simple docstring''' _A = get_data(_snake_case ) _A = calculate_each_score(_snake_case , _snake_case ) _A = generate_final_scores(_snake_case ) # append scores to source data for i, ele in enumerate(_snake_case ): source_data[i].append(_snake_case ) return source_data
315
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
1
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _snake_case ( _snake_case : str ) -> None: '''simple docstring''' _A , _A = analyze_text(_snake_case ) _A = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. _A = sum(single_char_strings.values() ) # one length string _A = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: _A = single_char_strings[ch] _A = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string _A = sum(two_char_strings.values() ) _A = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: _A = cha + cha if sequence in two_char_strings: _A = two_char_strings[sequence] _A = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def _snake_case ( _snake_case : str ) -> tuple[dict, dict]: '''simple docstring''' _A = Counter() # type: ignore _A = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _snake_case ( ) -> Any: '''simple docstring''' import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
315
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
1
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger() @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : nn.Module UpperCAmelCase : List[nn.Module] = field(default_factory=__lowerCAmelCase ) UpperCAmelCase : list = field(default_factory=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tensor , _UpperCAmelCase : Tensor ): _A = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(_UpperCAmelCase ) def __call__( self : List[str] , _UpperCAmelCase : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def lowerCAmelCase_ ( self : Union[str, Any] ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : nn.Module UpperCAmelCase : nn.Module UpperCAmelCase : int = 0 UpperCAmelCase : List = field(default_factory=__lowerCAmelCase ) UpperCAmelCase : List = field(default_factory=__lowerCAmelCase ) def __call__( self : List[str] , _UpperCAmelCase : Tensor ): _A = Tracker(self.dest )(_UpperCAmelCase ).parametrized _A = Tracker(self.src )(_UpperCAmelCase ).parametrized _A = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) ) _A = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise Exception( F'''Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while''' F''' destination module has {len(_UpperCAmelCase )}.''' ) for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def _snake_case ( _snake_case : str , _snake_case : ResNetConfig , _snake_case : Path , _snake_case : bool = True ) -> str: '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): _A = timm.create_model(_snake_case , pretrained=_snake_case ).eval() _A = ResNetForImageClassification(_snake_case ).eval() _A = ModuleTransfer(src=_snake_case , dest=_snake_case ) _A = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(_snake_case ) assert torch.allclose(from_model(_snake_case ) , our_model(_snake_case ).logits ), "The model logits don't match the original one." _A = F'''resnet{"-".join(name.split("resnet" ) )}''' print(_snake_case ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_snake_case , ) # we can use the convnext one _A = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_snake_case , ) print(F'''Pushed {checkpoint_name}''' ) def _snake_case ( _snake_case : Path , _snake_case : str = None , _snake_case : bool = True ) -> str: '''simple docstring''' _A = 'imagenet-1k-id2label.json' _A = 10_00 _A = (1, num_labels) _A = 'huggingface/label-files' _A = num_labels _A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) ) _A = {int(_snake_case ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} _A = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case ) _A = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(_snake_case , names_to_config[model_name] , _snake_case , _snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_snake_case , _snake_case , _snake_case , _snake_case ) return config, expected_shape if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) a = parser.parse_args() a = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
315
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Tuple = 1 @register_to_config def __init__( self : Optional[int] , _UpperCAmelCase : Any=2_000 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=20 , _UpperCAmelCase : List[str]=1E-3 ): _A = None _A = None _A = None def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, torch.device] = None ): _A = torch.linspace(1 , self.config.sampling_eps , _UpperCAmelCase , device=_UpperCAmelCase ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=None ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A = std.flatten() while len(std.shape ) < len(score.shape ): _A = std.unsqueeze(-1 ) _A = -score / std # compute _A = -1.0 / len(self.timesteps ) _A = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A = beta_t.unsqueeze(-1 ) _A = -0.5 * beta_t * x _A = torch.sqrt(_UpperCAmelCase ) _A = drift - diffusion**2 * score _A = x + drift * dt # add noise _A = randn_tensor(x.shape , layout=x.layout , generator=_UpperCAmelCase , device=x.device , dtype=x.dtype ) _A = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : Optional[int] ): return self.config.num_train_timesteps
315
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
1
"""simple docstring""" # Lint as: python3 import itertools import os import re a = re.compile(r'''([A-Z]+)([A-Z][a-z])''') a = re.compile(r'''([a-z\d])([A-Z])''') a = re.compile(r'''(?<!_)_(?!_)''') a = re.compile(r'''(_{2,})''') a = r'''^\w+(\.\w+)*$''' a = r'''<>:/\|?*''' def _snake_case ( _snake_case : Union[str, Any] ) -> Dict: '''simple docstring''' _A = _uppercase_uppercase_re.sub(R'\1_\2' , _snake_case ) _A = _lowercase_uppercase_re.sub(R'\1_\2' , _snake_case ) return name.lower() def _snake_case ( _snake_case : str ) -> Dict: '''simple docstring''' _A = _single_underscore_re.split(_snake_case ) _A = [_multiple_underscores_re.split(_snake_case ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(_snake_case ) if n != '' ) def _snake_case ( _snake_case : List[Any] ) -> int: '''simple docstring''' if os.path.basename(_snake_case ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) return camelcase_to_snakecase(_snake_case ) def _snake_case ( _snake_case : Optional[Any] , _snake_case : int ) -> Dict: '''simple docstring''' if os.path.basename(_snake_case ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) if not re.match(_split_re , _snake_case ): raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' ) return F'''{filename_prefix_for_name(_snake_case )}-{split}''' def _snake_case ( _snake_case : List[Any] , _snake_case : int , _snake_case : List[str] , _snake_case : List[str]=None ) -> Optional[Any]: '''simple docstring''' _A = filename_prefix_for_split(_snake_case , _snake_case ) if filetype_suffix: prefix += F'''.{filetype_suffix}''' _A = os.path.join(_snake_case , _snake_case ) return F'''{filepath}*''' def _snake_case ( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[Any]=None , _snake_case : int=None ) -> List[Any]: '''simple docstring''' _A = filename_prefix_for_split(_snake_case , _snake_case ) _A = os.path.join(_snake_case , _snake_case ) if shard_lengths: _A = len(_snake_case ) _A = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_snake_case )] if filetype_suffix: _A = [filename + F'''.{filetype_suffix}''' for filename in filenames] return filenames else: _A = prefix if filetype_suffix: filename += F'''.{filetype_suffix}''' return [filename]
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
1
"""simple docstring""" def _snake_case ( _snake_case : int ) -> int: '''simple docstring''' _A = [[0 for _ in range(_snake_case )] for _ in range(m + 1 )] for i in range(m + 1 ): _A = 1 for n in range(m + 1 ): for k in range(1 , _snake_case ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: a = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: a = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
315
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
1