code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
A__ : List[str] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
A__ : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
A__ : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
], )
def lowercase__ ( self : Tuple, lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Dict=None, lowerCamelCase : Dict=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Tuple=None, lowerCamelCase : Optional[Any]="auto", lowerCamelCase : Any=-1, lowerCamelCase : Union[str, Any]=0.9, lowerCamelCase : List[Any]=5, lowerCamelCase : Optional[Any]=500, lowerCamelCase : Union[str, Any]="gpt2-large", lowerCamelCase : Optional[int]=-1, lowerCamelCase : int=1_024, lowerCamelCase : int=25, lowerCamelCase : int=5, lowerCamelCase : Tuple=True, lowerCamelCase : int=25, ):
'''simple docstring'''
lowercase__ = compute_mauve(
p_text=lowerCamelCase, q_text=lowerCamelCase, p_features=lowerCamelCase, q_features=lowerCamelCase, p_tokens=lowerCamelCase, q_tokens=lowerCamelCase, num_buckets=lowerCamelCase, pca_max_data=lowerCamelCase, kmeans_explained_var=lowerCamelCase, kmeans_num_redo=lowerCamelCase, kmeans_max_iter=lowerCamelCase, featurize_model_name=lowerCamelCase, device_id=lowerCamelCase, max_text_length=lowerCamelCase, divergence_curve_discretization_size=lowerCamelCase, mauve_scaling_factor=lowerCamelCase, verbose=lowerCamelCase, seed=lowerCamelCase, )
return out
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """Pix2StructImageProcessor"""
lowercase__ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = False
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : str, lowerCamelCase : str=None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = 2_048, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : Any, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase__ = self.image_processor(
lowerCamelCase, return_tensors=lowerCamelCase, max_patches=lowerCamelCase, **lowerCamelCase )
else:
# add pixel_values and bbox
lowercase__ = self.image_processor(
lowerCamelCase, return_tensors=lowerCamelCase, max_patches=lowerCamelCase, header_text=lowerCamelCase, **lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
if "attention_mask" in text_encoding:
lowercase__ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowercase__ = text_encoding.pop('''input_ids''' )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def lowercase__ ( self : List[Any], *lowerCamelCase : Optional[int], **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Union[str, Any], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
A__ : str = 637_8137.0
A__ : Tuple = 635_6752.31_4245
A__ : str = 6_37_81_37
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = radians(lowerCamelCase_ )
lowercase__ = radians(lowerCamelCase_ )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(lowerCamelCase_ ) * cos(lowerCamelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BlipImageProcessor"""
lowercase__ = """AutoTokenizer"""
def __init__( self : int, lowerCamelCase : Optional[Any], lowerCamelCase : str, lowerCamelCase : List[str] ):
'''simple docstring'''
super().__init__(lowerCamelCase, lowerCamelCase )
# add QFormer tokenizer
lowercase__ = qformer_tokenizer
def __call__( self : Any, lowerCamelCase : ImageInput = None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase__ = BatchFeature()
if text is not None:
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
encoding.update(lowerCamelCase )
lowercase__ = self.qformer_tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
lowercase__ = qformer_text_encoding.pop('''input_ids''' )
lowercase__ = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase__ = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase )
encoding.update(lowerCamelCase )
return encoding
def lowercase__ ( self : Union[str, Any], *lowerCamelCase : Dict, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Tuple, *lowerCamelCase : Dict, **lowerCamelCase : int ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if os.path.isfile(lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase )
lowercase__ = os.path.join(lowerCamelCase, '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(lowerCamelCase )
return super().save_pretrained(lowerCamelCase, **lowerCamelCase )
@classmethod
def lowercase__ ( cls : Tuple, lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(lowerCamelCase, subfolder='''qformer_tokenizer''' )
lowercase__ = cls._get_arguments_from_pretrained(lowerCamelCase, **lowerCamelCase )
args.append(lowerCamelCase )
return cls(*lowerCamelCase )
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A__ : Tuple = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Optional[int], lowerCamelCase : Any=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : List[str]=30, lowerCamelCase : Optional[Any]=400, lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=True, lowerCamelCase : int=True, lowerCamelCase : Dict=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''height''': 20, '''width''': 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1_024, 2_048, 4_096]
lowercase__ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowercase__ ( self : Dict ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" ,)
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_convert_rgb''' ) )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2_048
lowercase__ = image_processor(lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1E-3, rtol=1E-3 ) )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase__ ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCamelCase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
lowercase__ = '''Hello'''
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase, header_text=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase, header_text=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" ,)
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def lowercase__ ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_convert_rgb''' ) )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A__ : Optional[Any] = NewType('DataClass', Any)
A__ : List[Any] = NewType('DataClassType', Any)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {str(lowerCamelCase_ ): choice for choice in choices}
return lambda lowerCamelCase_ : str_to_choice.get(lowerCamelCase_ , lowerCamelCase_ )
def a ( *,
lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = dataclasses.MISSING , lowerCamelCase_ = dataclasses.MISSING , lowerCamelCase_ = None , **lowerCamelCase_ , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowercase__ = {}
if aliases is not None:
lowercase__ = aliases
if help is not None:
lowercase__ = help
return dataclasses.field(metadata=lowerCamelCase_ , default=lowerCamelCase_ , default_factory=lowerCamelCase_ , **lowerCamelCase_ )
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
def __init__( self : List[Any], lowerCamelCase : Union[DataClassType, Iterable[DataClassType]], **lowerCamelCase : str ):
'''simple docstring'''
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowercase__ = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase )
if dataclasses.is_dataclass(lowerCamelCase ):
lowercase__ = [dataclass_types]
lowercase__ = list(lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase )
@staticmethod
def lowercase__ ( lowerCamelCase : ArgumentParser, lowerCamelCase : dataclasses.Field ):
'''simple docstring'''
lowercase__ = F"""--{field.name}"""
lowercase__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, lowerCamelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowercase__ = kwargs.pop('''aliases''', [] )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [aliases]
lowercase__ = getattr(field.type, '''__origin__''', field.type )
if origin_type is Union or (hasattr(lowerCamelCase, '''UnionType''' ) and isinstance(lowerCamelCase, types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F""" Problem encountered in field '{field.name}'.""" )
if type(lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
lowercase__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowercase__ = getattr(field.type, '''__origin__''', field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowercase__ = (
field.type.__args__[0] if isinstance(lowerCamelCase, field.type.__args__[1] ) else field.type.__args__[1]
)
lowercase__ = getattr(field.type, '''__origin__''', field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowercase__ = {}
if origin_type is Literal or (isinstance(field.type, lowerCamelCase ) and issubclass(field.type, lowerCamelCase )):
if origin_type is Literal:
lowercase__ = field.type.__args__
else:
lowercase__ = [x.value for x in field.type]
lowercase__ = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowercase__ = field.default
else:
lowercase__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowercase__ = copy(lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
lowercase__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowercase__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowercase__ = default
# This tells argparse we accept 0 or 1 value after --field_name
lowercase__ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowercase__ = True
elif isclass(lowerCamelCase ) and issubclass(lowerCamelCase, lowerCamelCase ):
lowercase__ = field.type.__args__[0]
lowercase__ = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowercase__ = field.default_factory()
elif field.default is dataclasses.MISSING:
lowercase__ = True
else:
lowercase__ = field.type
if field.default is not dataclasses.MISSING:
lowercase__ = field.default
elif field.default_factory is not dataclasses.MISSING:
lowercase__ = field.default_factory()
else:
lowercase__ = True
parser.add_argument(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowercase__ = False
parser.add_argument(F"""--no_{field.name}""", action='''store_false''', dest=field.name, **lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : DataClassType ):
'''simple docstring'''
if hasattr(lowerCamelCase, '''_argument_group_name''' ):
lowercase__ = self.add_argument_group(dtype._argument_group_name )
else:
lowercase__ = self
try:
lowercase__ = get_type_hints(lowerCamelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase ):
lowercase__ = '''.'''.join(map(lowerCamelCase, sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase ):
if not field.init:
continue
lowercase__ = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Tuple, lowerCamelCase : List[str]=None, lowerCamelCase : Tuple=False, lowerCamelCase : List[str]=True, lowerCamelCase : Dict=None, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowercase__ = []
if args_filename:
args_files.append(Path(lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowercase__ = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase, type=lowerCamelCase, action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowercase__ , lowercase__ = args_file_parser.parse_known_args(args=lowerCamelCase )
lowercase__ = vars(lowerCamelCase ).get(args_file_flag.lstrip('''-''' ), lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase ) for p in cmd_args_file_paths] )
lowercase__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowercase__ = file_args + args if args is not None else file_args + sys.argv[1:]
lowercase__ , lowercase__ = self.parse_known_args(args=lowerCamelCase )
lowercase__ = []
for dtype in self.dataclass_types:
lowercase__ = {f.name for f in dataclasses.fields(lowerCamelCase ) if f.init}
lowercase__ = {k: v for k, v in vars(lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase, lowerCamelCase )
lowercase__ = dtype(**lowerCamelCase )
outputs.append(lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase__ ( self : List[Any], lowerCamelCase : Dict[str, Any], lowerCamelCase : bool = False ):
'''simple docstring'''
lowercase__ = set(args.keys() )
lowercase__ = []
for dtype in self.dataclass_types:
lowercase__ = {f.name for f in dataclasses.fields(lowerCamelCase ) if f.init}
lowercase__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowercase__ = dtype(**lowerCamelCase )
outputs.append(lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase )}""" )
return tuple(lowerCamelCase )
def lowercase__ ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : bool = False ):
'''simple docstring'''
with open(Path(lowerCamelCase ), encoding='''utf-8''' ) as open_json_file:
lowercase__ = json.loads(open_json_file.read() )
lowercase__ = self.parse_dict(lowerCamelCase, allow_extra_keys=lowerCamelCase )
return tuple(lowerCamelCase )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : bool = False ):
'''simple docstring'''
lowercase__ = self.parse_dict(yaml.safe_load(Path(lowerCamelCase ).read_text() ), allow_extra_keys=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"""add_prefix_space""": True}
lowercase__ = False
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase ) )
def lowercase__ ( self : Tuple, **lowerCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : List[Any], **lowerCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Dict, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = '''lower newer'''
lowercase__ = '''lower newer'''
return input_text, output_text
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase__ = '''lower newer'''
lowercase__ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase__ = tokenizer.tokenize(lowerCamelCase, add_prefix_space=lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase )
lowercase__ = '''lower newer'''
# Testing tokenization
lowercase__ = tokenizer.tokenize(lowerCamelCase, add_prefix_space=lowerCamelCase )
lowercase__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
# Testing conversion to ids without special tokens
lowercase__ = tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase, add_prefix_space=lowerCamelCase )
lowercase__ = rust_tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
# Testing conversion to ids with special tokens
lowercase__ = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase )
lowercase__ = tokenizer.encode(lowerCamelCase, add_prefix_space=lowerCamelCase )
lowercase__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
# Testing the unknown token
lowercase__ = tokens + [rust_tokenizer.unk_token]
lowercase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase ), lowerCamelCase )
def lowercase__ ( self : List[str], *lowerCamelCase : str, **lowerCamelCase : Dict ):
'''simple docstring'''
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowercase__ ( self : Any, lowerCamelCase : Any=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
# Simple input
lowercase__ = '''This is a simple input'''
lowercase__ = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ = ('''This is a simple input''', '''This is a pair''')
lowercase__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCamelCase, tokenizer_r.encode, lowerCamelCase, max_length=lowerCamelCase, padding='''max_length''' )
# Simple input
self.assertRaises(lowerCamelCase, tokenizer_r.encode_plus, lowerCamelCase, max_length=lowerCamelCase, padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCamelCase, tokenizer_r.batch_encode_plus, lowerCamelCase, max_length=lowerCamelCase, padding='''max_length''', )
# Pair input
self.assertRaises(lowerCamelCase, tokenizer_r.encode, lowerCamelCase, max_length=lowerCamelCase, padding='''max_length''' )
# Pair input
self.assertRaises(lowerCamelCase, tokenizer_r.encode_plus, lowerCamelCase, max_length=lowerCamelCase, padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCamelCase, tokenizer_r.batch_encode_plus, lowerCamelCase, max_length=lowerCamelCase, padding='''max_length''', )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token='''<pad>''' )
# Simple input
lowercase__ = '''This is a simple input'''
lowercase__ = ['''This is a simple input looooooooong''', '''This is a simple input''']
lowercase__ = ('''This is a simple input''', '''This is a pair''')
lowercase__ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer(lowerCamelCase, padding='''max_length''', max_length=30, return_tensors='''np''' )
lowercase__ = tokenizer(lowerCamelCase, padding=lowerCamelCase, truncate=lowerCamelCase, return_tensors='''np''' )
lowercase__ = tokenizer(*lowerCamelCase, padding='''max_length''', max_length=60, return_tensors='''np''' )
lowercase__ = tokenizer(lowerCamelCase, padding=lowerCamelCase, truncate=lowerCamelCase, return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = '''$$$'''
lowercase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=lowerCamelCase, add_bos_token=lowerCamelCase )
lowercase__ = '''This is a simple input'''
lowercase__ = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer(lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
self.assertEqual(out_s.input_ids[0], lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ = tokenizer.decode(out_s.input_ids )
lowercase__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
lowercase__ = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
lowercase__ = '''\nif len_a > len_b: result = a\nelse: result = b'''
lowercase__ = tokenizer.encode(lowerCamelCase )
lowercase__ = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
lowercase__ = tokenizer.decode(lowerCamelCase, truncate_before_pattern=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
pass
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
# setable values
lowercase__ = 42
lowercase__ = 42
lowercase__ = None
@classmethod
def lowercase__ ( cls : Tuple, lowerCamelCase : CommonSchedulerState, lowerCamelCase : jnp.ndarray, lowerCamelCase : jnp.ndarray ):
'''simple docstring'''
return cls(common=lowerCamelCase, init_noise_sigma=lowerCamelCase, timesteps=lowerCamelCase )
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase__ = 42
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Any, lowerCamelCase : int = 1_000, lowerCamelCase : float = 0.0001, lowerCamelCase : float = 0.02, lowerCamelCase : str = "linear", lowerCamelCase : Optional[jnp.ndarray] = None, lowerCamelCase : str = "fixed_small", lowerCamelCase : bool = True, lowerCamelCase : str = "epsilon", lowerCamelCase : jnp.dtype = jnp.floataa, ):
'''simple docstring'''
lowercase__ = dtype
def lowercase__ ( self : List[str], lowerCamelCase : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0, dtype=self.dtype )
lowercase__ = jnp.arange(0, self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCamelCase, init_noise_sigma=lowerCamelCase, timesteps=lowerCamelCase, )
def lowercase__ ( self : List[str], lowerCamelCase : DDPMSchedulerState, lowerCamelCase : jnp.ndarray, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Optional[Any], lowerCamelCase : DDPMSchedulerState, lowerCamelCase : int, lowerCamelCase : Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0, lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCamelCase, timesteps=lowerCamelCase, )
def lowercase__ ( self : Optional[int], lowerCamelCase : DDPMSchedulerState, lowerCamelCase : int, lowerCamelCase : Any=None, lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(lowerCamelCase, a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(lowerCamelCase, a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self : Any, lowerCamelCase : DDPMSchedulerState, lowerCamelCase : jnp.ndarray, lowerCamelCase : int, lowerCamelCase : jnp.ndarray, lowerCamelCase : Optional[jax.random.KeyArray] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(lowerCamelCase, sample.shape[1], axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(lowerCamelCase, -1, 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(lowerCamelCase, num=1 )
lowercase__ = jax.random.normal(lowerCamelCase, shape=model_output.shape, dtype=self.dtype )
return (self._get_variance(lowerCamelCase, lowerCamelCase, predicted_variance=lowerCamelCase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0, random_variance(), jnp.zeros(model_output.shape, dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCamelCase, state=lowerCamelCase )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : DDPMSchedulerState, lowerCamelCase : jnp.ndarray, lowerCamelCase : jnp.ndarray, lowerCamelCase : jnp.ndarray, ):
'''simple docstring'''
return add_noise_common(state.common, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Optional[int], lowerCamelCase : DDPMSchedulerState, lowerCamelCase : jnp.ndarray, lowerCamelCase : jnp.ndarray, lowerCamelCase : jnp.ndarray, ):
'''simple docstring'''
return get_velocity_common(state.common, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def __len__( self : Dict ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase__ ( self : str ):
'''simple docstring'''
super().setUp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase__ = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_input_output_texts(lowerCamelCase )
lowercase__ = tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.decode(lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase )
return text, ids
def lowercase__ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCamelCase )
lowercase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCamelCase, '''wb''' ) as handle:
pickle.dump(lowerCamelCase, lowerCamelCase )
with open(lowerCamelCase, '''rb''' ) as handle:
lowercase__ = pickle.load(lowerCamelCase )
lowercase__ = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
try:
lowercase__ = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
try:
lowercase__ = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MecabTokenizer(do_lower_case=lowerCamelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : Any ):
'''simple docstring'''
try:
lowercase__ = MecabTokenizer(
do_lower_case=lowerCamelCase, normalize_text=lowerCamelCase, mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = MecabTokenizer(normalize_text=lowerCamelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''], )
@require_sudachi
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCamelCase )
lowercase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCamelCase, '''wb''' ) as handle:
pickle.dump(lowerCamelCase, lowerCamelCase )
with open(lowerCamelCase, '''rb''' ) as handle:
lowercase__ = pickle.load(lowerCamelCase )
lowercase__ = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
@require_sudachi
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人''', '''参政権'''] )
@require_sudachi
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人参政権'''] )
@require_sudachi
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(do_lower_case=lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(normalize_text=lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(trim_whitespace=lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCamelCase )
lowercase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCamelCase, '''wb''' ) as handle:
pickle.dump(lowerCamelCase, lowerCamelCase )
with open(lowerCamelCase, '''rb''' ) as handle:
lowercase__ = pickle.load(lowerCamelCase )
lowercase__ = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
@require_jumanpp
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer(normalize_text=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer(trim_whitespace=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ), ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''], )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowercase__ = {}
for i, token in enumerate(lowerCamelCase ):
lowercase__ = i
lowercase__ = WordpieceTokenizer(vocab=lowerCamelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ), ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ), ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowercase__ = tokenizer.subword_tokenizer
lowercase__ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCamelCase, ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowercase__ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCamelCase, ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowercase__ = tokenizer.encode('''ありがとう。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''どういたしまして。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : str, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type='''character''', **lowerCamelCase )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase__ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def lowercase__ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, subword_tokenizer_type='''character''' )
lowercase__ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCamelCase, ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase__ = {}
for i, token in enumerate(lowerCamelCase ):
lowercase__ = i
lowercase__ = CharacterTokenizer(vocab=lowerCamelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowercase__ = tokenizer.encode('''ありがとう。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''どういたしまして。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cl-tohoku/bert-base-japanese'''
lowercase__ = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowercase__ = '''bert-base-cased'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
import heapq
import sys
import numpy as np
A__ : Tuple = tuple[int, int]
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = []
lowercase__ = set()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return len(self.elements ) == 0
def lowercase__ ( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements, (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements, (pro, xxx) )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
if item in self.set:
self.set.remove(lowerCamelCase )
lowercase__ = []
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements, (prito, yyy) )
def lowercase__ ( self : Any ):
'''simple docstring'''
return self.elements[0][1]
def lowercase__ ( self : Any ):
'''simple docstring'''
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# euclidean distance
lowercase__ = np.array(lowerCamelCase_ )
lowercase__ = np.array(lowerCamelCase_ )
return np.linalg.norm(a - b )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# integer division by time variable
return consistent_heuristic(lowerCamelCase_ , lowerCamelCase_ ) // t
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = g_function[start] + Wa * heuristics[i](lowerCamelCase_ , lowerCamelCase_ )
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = np.chararray((n, n) )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
lowercase__ = '''*'''
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '''#'''
lowercase__ = '''-'''
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__) , (lowercase__)) = x
# print(x)
lowercase__ = '''-'''
lowercase__ = back_pointer[x]
lowercase__ = '''-'''
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
lowercase__ = back_pointer[goal]
while x != start:
print(lowerCamelCase_ , end=''' ''' )
lowercase__ = back_pointer[x]
print(lowerCamelCase_ )
sys.exit()
def a ( lowerCamelCase_ ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
'''simple docstring'''
for itera in range(lowerCamelCase_ ):
open_list[itera].remove_element(lowerCamelCase_ )
# print("s", s)
# print("j", j)
((lowercase__) , (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase_ )
lowercase__ = -1
lowercase__ = float('''inf''' )
if valid(lowerCamelCase_ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase_ , key(lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase_ ):
if key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) <= Wa * key(
lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ):
open_list[j].put(
lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
def a ( ):
'''simple docstring'''
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
A__ : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
A__ : Optional[int] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
A__ : Optional[Any] = make_common_ground()
A__ : Optional[Any] = blocks_blk
# hyper parameters
A__ : Tuple = 1
A__ : Optional[Any] = 1
A__ : List[Any] = 20
A__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
A__ : int = (0, 0)
A__ : Tuple = (n - 1, n - 1)
A__ : str = 1
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {start: 0, goal: float('''inf''' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(lowerCamelCase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowerCamelCase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
lowercase__ , lowercase__ = open_list[i].top_show()
visited.add(lowerCamelCase_ )
expand_state(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
close_list_inad.append(lowerCamelCase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
lowercase__ = open_list[0].top_show()
visited.add(lowerCamelCase_ )
expand_state(
lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
close_list_anchor.append(lowerCamelCase_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
A__ : Optional[Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A__ : Optional[int] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
A__ : int = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''http://www.cs.umd.edu/~snover/tercom/''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''', id='''sequence''' ), id='''references''' ),
} ), codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''], reference_urls=[
'''https://github.com/jhclark/tercom''',
], )
def lowercase__ ( self : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, ):
'''simple docstring'''
lowercase__ = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase__ = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
lowercase__ = TER(
normalized=lowerCamelCase, no_punct=lowerCamelCase, asian_support=lowerCamelCase, case_sensitive=lowerCamelCase, )
lowercase__ = sb_ter.corpus_score(lowerCamelCase, lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = CustomTokenizer
pass
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
import argparse
import os
import re
import packaging.version
A__ : Dict = 'examples/'
A__ : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
A__ : List[Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
A__ : Optional[int] = 'README.md'
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__ = f.read()
lowercase__ , lowercase__ = REPLACE_PATTERNS[pattern]
lowercase__ = replace.replace('''VERSION''' , lowerCamelCase_ )
lowercase__ = re_pattern.sub(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
for folder, directories, fnames in os.walk(lowerCamelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , pattern='''examples''' )
def a ( lowerCamelCase_ , lowerCamelCase_=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if not patch:
update_version_in_examples(lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = '''🤗 Transformers currently provides the following architectures'''
lowercase__ = '''1. Want to contribute a new model?'''
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__ = f.readlines()
# Find the start of the list.
lowercase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase__ = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCamelCase_ )
def a ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase__ = f.read()
lowercase__ = REPLACE_PATTERNS['''init'''][0].search(lowerCamelCase_ ).groups()[0]
return packaging.version.parse(lowerCamelCase_ )
def a ( lowerCamelCase_=False ):
'''simple docstring'''
lowercase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase__ = default_version.base_version
elif patch:
lowercase__ = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowercase__ = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowercase__ = input(F"""Which version are you releasing? [{default_version}]""" )
if len(lowerCamelCase_ ) == 0:
lowercase__ = default_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCamelCase_ , patch=lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = get_version()
lowercase__ = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowercase__ = current_version.base_version
# Check with the user we got that right.
lowercase__ = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCamelCase_ ) == 0:
lowercase__ = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCamelCase_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
A__ : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A__ : int = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def lowercase__ ( self : int ):
'''simple docstring'''
super().setUp()
lowercase__ = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname, legacy_format=lowerCamelCase )
lowercase__ = tokenizer
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ), lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ), lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''<mask>''' )
self.assertEqual(len(lowerCamelCase ), 101_122 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 101_122 )
@require_torch
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__ = [0, 57, 3_018, 70_307, 91, 2]
lowercase__ = self.tokenizer(
lowerCamelCase, max_length=len(lowerCamelCase ), padding=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 6), batch.input_ids.shape )
self.assertEqual((2, 6), batch.attention_mask.shape )
lowercase__ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
lowercase__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
lowercase__ = rust_tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCamelCase )
lowercase__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
# fmt: off
lowercase__ = {'''input_ids''': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase, model_name='''moussaKam/mbarthez''', revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''', sequences=lowerCamelCase, )
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if n_term == "":
return []
lowercase__ = []
for temp in range(int(lowerCamelCase_ ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
A__ : int = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
from collections import Counter
from timeit import timeit
def a ( lowerCamelCase_ = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def a ( lowerCamelCase_ = "" ):
'''simple docstring'''
if len(lowerCamelCase_ ) == 0:
return True
lowercase__ = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase__ = {}
for character in lower_case_input_str:
lowercase__ = character_freq_dict.get(lowerCamelCase_ , 0 ) + 1
lowercase__ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a ( lowerCamelCase_ = "" ):
'''simple docstring'''
print('''\nFor string = ''' , lowerCamelCase_ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(lowerCamelCase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
A__ : Optional[int] = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
A__ : List[str] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = TransfoXLTokenizer
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase__ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : List[Any], **lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''<unk> UNwanted , running'''
lowercase__ = '''<unk> unwanted, running'''
return input_text, output_text
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=lowerCamelCase )
lowercase__ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCamelCase, ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [0, 4, 8, 7] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=lowerCamelCase )
lowercase__ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowercase__ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase ), lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCamelCase ), lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = len(lowerCamelCase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''', 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCamelCase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ), [1] )
self.assertEqual(tokenizer.decode([1] ), '''new1''' )
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( lowerCamelCase_ = True , *lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
lowercase__ = False
if main_process_only:
lowercase__ = PartialState().local_process_index == 0
return _tqdm(*lowerCamelCase_ , **lowerCamelCase_ , disable=lowerCamelCase_ )
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = (PNDMScheduler,)
lowercase__ = (("""num_inference_steps""", 50),)
def lowercase__ ( self : Any, **lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self : List[str], lowerCamelCase : int=0, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**lowerCamelCase )
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : int ):
'''simple docstring'''
pass
def lowercase__ ( self : Any, lowerCamelCase : Dict=0, **lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : List[Any], **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCamelCase )
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
return sample
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase, '''set_timesteps''' ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase, '''set_timesteps''' ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(lowerCamelCase, 0, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = scheduler.step_prk(lowerCamelCase, 1, lowerCamelCase, **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step_plms(lowerCamelCase, 0, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = scheduler.step_plms(lowerCamelCase, 1, lowerCamelCase, **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase )
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1 )
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps, torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ), )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase, beta_end=lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowercase__ = 27
for scheduler_class in self.scheduler_classes:
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase__ = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
def lowercase__ ( self : Tuple ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase ):
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample ).prev_sample
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.full_loop(prediction_type='''v_prediction''' )
lowercase__ = torch.sum(torch.abs(lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowercase__ ( self : Tuple ):
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
lowercase__ = self.full_loop(set_alpha_to_one=lowerCamelCase, beta_start=0.01 )
lowercase__ = torch.sum(torch.abs(lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowercase__ ( self : Tuple ):
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
lowercase__ = self.full_loop(set_alpha_to_one=lowerCamelCase, beta_start=0.01 )
lowercase__ = torch.sum(torch.abs(lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Dict = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Tuple = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """vit_mae"""
def __init__( self : List[Any], lowerCamelCase : Optional[Any]=768, lowerCamelCase : Dict=12, lowerCamelCase : List[str]=12, lowerCamelCase : Optional[int]=3_072, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : int=0.0, lowerCamelCase : List[str]=0.0, lowerCamelCase : Any=0.02, lowerCamelCase : Optional[int]=1E-12, lowerCamelCase : Tuple=224, lowerCamelCase : int=16, lowerCamelCase : List[str]=3, lowerCamelCase : Dict=True, lowerCamelCase : List[Any]=16, lowerCamelCase : int=512, lowerCamelCase : int=8, lowerCamelCase : List[str]=2_048, lowerCamelCase : Tuple=0.75, lowerCamelCase : Optional[int]=False, **lowerCamelCase : Optional[int], ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = decoder_num_attention_heads
lowercase__ = decoder_hidden_size
lowercase__ = decoder_num_hidden_layers
lowercase__ = decoder_intermediate_size
lowercase__ = mask_ratio
lowercase__ = norm_pix_loss
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Optional[Any] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import argparse
A__ : str = 'docs/source/_static/js/custom.js'
def a ( lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
lowercase__ = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCamelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
A__ : List[Any] = parser.parse_args()
update_custom_js(args.version)
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : Any, lowerCamelCase : Optional[int]=13, lowerCamelCase : int=3, lowerCamelCase : Optional[Any]=224, lowerCamelCase : List[str]=30, lowerCamelCase : List[Any]=400, lowerCamelCase : List[str]=True, lowerCamelCase : Tuple=None, lowerCamelCase : int=True, lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5], lowerCamelCase : Any=[0.5, 0.5, 0.5], ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = EfficientFormerImageProcessorTester(self )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
lowercase__ = image_processor(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def lowercase__ ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
lowercase__ = image_processor(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
lowercase__ = image_processor(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : str = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Union[str, Any] = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
A__ : int = logging.get_logger(__name__)
A__ : List[Any] = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
lowercase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowercase__ = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowercase__ = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowercase__ = os.path.join(get_home_dir() , '''models''' )
lowercase__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
lowercase__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
lowercase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowercase__ = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(lowerCamelCase_ ),
}
lowercase__ = BertConfig.from_dict(lowerCamelCase_ )
lowercase__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = hf_param.shape
lowercase__ = to_torch(params[gluon_param] )
lowercase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowercase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowercase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowercase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowercase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowercase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowercase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowercase__ = layer.attention.self
lowercase__ = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowercase__ = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowercase__ = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowercase__ = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowercase__ = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowercase__ = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowercase__ = layer.attention.output
lowercase__ = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
lowercase__ = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
lowercase__ = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowercase__ = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowercase__ = layer.intermediate
lowercase__ = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowercase__ = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowercase__ = layer.output
lowercase__ = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowercase__ = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowercase__ = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowercase__ = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowercase__ = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowercase__ = tokenizer.encode_plus(lowerCamelCase_ )['''input_ids''']
# Get gluon output
lowercase__ = mx.nd.array([input_ids] )
lowercase__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
lowercase__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
lowercase__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='''pt''' )
lowercase__ = hf_bort_model(**lowerCamelCase_ )[0]
lowercase__ = output_gluon[0].asnumpy()
lowercase__ = output_hf[0].detach().numpy()
lowercase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowercase__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , lowerCamelCase_ )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A__ : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : int = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """beit"""
def __init__( self : str, lowerCamelCase : Dict=8_192, lowerCamelCase : Optional[Any]=768, lowerCamelCase : Union[str, Any]=12, lowerCamelCase : Union[str, Any]=12, lowerCamelCase : List[Any]=3_072, lowerCamelCase : Tuple="gelu", lowerCamelCase : str=0.0, lowerCamelCase : Dict=0.0, lowerCamelCase : Optional[int]=0.02, lowerCamelCase : List[str]=1E-12, lowerCamelCase : List[str]=224, lowerCamelCase : Dict=16, lowerCamelCase : Tuple=3, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Dict=False, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : int=0.1, lowerCamelCase : Optional[int]=True, lowerCamelCase : str=[3, 5, 7, 11], lowerCamelCase : Tuple=[1, 2, 3, 6], lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Tuple=0.4, lowerCamelCase : Dict=256, lowerCamelCase : List[str]=1, lowerCamelCase : Tuple=False, lowerCamelCase : Optional[Any]=255, **lowerCamelCase : List[Any], ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = version.parse("""1.11""" )
@property
def lowercase__ ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return 1E-4
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
A__ : List[str] = get_tests_dir('fixtures/dummy-config.json')
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = 0
def lowercase__ ( self : Any ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowercase__ = os.path.join(lowerCamelCase, '''fake-roberta''' )
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase )
with open(os.path.join(lowerCamelCase, '''config.json''' ), '''w''' ) as f:
f.write(json.dumps({} ) )
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase )
self.assertEqual(type(lowerCamelCase ), lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
try:
AutoConfig.register('''custom''', lowerCamelCase )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase ):
AutoConfig.register('''model''', lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoConfig.register('''bert''', lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase )
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase, '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase__ = AutoConfig.from_pretrained('''bert-base''' )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase, revision='''aaaaaa''' )
def lowercase__ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase, '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''', ):
lowercase__ = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase ):
lowercase__ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
lowercase__ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''', trust_remote_code=lowerCamelCase )
lowercase__ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''', trust_remote_code=lowerCamelCase )
self.assertEqual(config.__class__.__name__, '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase )
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase, trust_remote_code=lowerCamelCase )
self.assertEqual(reloaded_config.__class__.__name__, '''NewModelConfig''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """new-model"""
try:
AutoConfig.register('''new-model''', lowerCamelCase )
# If remote code is not set, the default is to use local
lowercase__ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__, '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
lowercase__ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''', trust_remote_code=lowerCamelCase )
self.assertEqual(config.__class__.__name__, '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
lowercase__ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''', trust_remote_code=lowerCamelCase )
self.assertEqual(config.__class__.__name__, '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
def a ( lowerCamelCase_ = 100 ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """bert-generation"""
def __init__( self : Dict, lowerCamelCase : Tuple=50_358, lowerCamelCase : Dict=1_024, lowerCamelCase : Optional[Any]=24, lowerCamelCase : Dict=16, lowerCamelCase : Tuple=4_096, lowerCamelCase : Dict="gelu", lowerCamelCase : Dict=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Optional[Any]=512, lowerCamelCase : List[Any]=0.02, lowerCamelCase : Union[str, Any]=1E-12, lowerCamelCase : Optional[int]=0, lowerCamelCase : Tuple=2, lowerCamelCase : Optional[Any]=1, lowerCamelCase : str="absolute", lowerCamelCase : Dict=True, **lowerCamelCase : List[Any], ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase__ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__ = tokenizer('''Hello there''', return_tensors='''tf''' ).input_ids
lowercase__ = tokenizer('''Hi I am''', return_tensors='''tf''' ).input_ids
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase ).loss
lowercase__ = -tf.math.reduce_mean(lowerCamelCase ).numpy()
lowercase__ = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Tuple = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """convbert"""
def __init__( self : int, lowerCamelCase : Any=30_522, lowerCamelCase : Optional[int]=768, lowerCamelCase : Optional[Any]=12, lowerCamelCase : List[str]=12, lowerCamelCase : Optional[Any]=3_072, lowerCamelCase : str="gelu", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Optional[int]=512, lowerCamelCase : Dict=2, lowerCamelCase : str=0.02, lowerCamelCase : List[Any]=1E-12, lowerCamelCase : Union[str, Any]=1, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Optional[int]=2, lowerCamelCase : Optional[Any]=768, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : str=9, lowerCamelCase : List[Any]=1, lowerCamelCase : str=None, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = embedding_size
lowercase__ = head_ratio
lowercase__ = conv_kernel_size
lowercase__ = num_groups
lowercase__ = classifier_dropout
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A__ : Optional[Any] = False
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Tuple = 'ybelkada/fonts'
def a ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , ['''torch'''] )
_check_torch_version()
lowercase__ = image_tensor.unsqueeze(0 )
lowercase__ = torch.nn.functional.unfold(lowerCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCamelCase_ , lowerCamelCase_ , -1 )
lowercase__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def a ( lowerCamelCase_ , lowerCamelCase_ = 36 , lowerCamelCase_ = "black" , lowerCamelCase_ = "white" , lowerCamelCase_ = 5 , lowerCamelCase_ = 5 , lowerCamelCase_ = 5 , lowerCamelCase_ = 5 , lowerCamelCase_ = None , lowerCamelCase_ = None , ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
lowercase__ = textwrap.TextWrapper(width=80 )
lowercase__ = wrapper.wrap(text=lowerCamelCase_ )
lowercase__ = '''\n'''.join(lowerCamelCase_ )
if font_bytes is not None and font_path is None:
lowercase__ = io.BytesIO(lowerCamelCase_ )
elif font_path is not None:
lowercase__ = font_path
else:
lowercase__ = hf_hub_download(lowerCamelCase_ , '''Arial.TTF''' )
lowercase__ = ImageFont.truetype(lowerCamelCase_ , encoding='''UTF-8''' , size=lowerCamelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase__ = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , lowerCamelCase_ ) )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = temp_draw.textbbox((0, 0) , lowerCamelCase_ , lowerCamelCase_ )
# Create the actual image with a bit of padding around the text.
lowercase__ = text_width + left_padding + right_padding
lowercase__ = text_height + top_padding + bottom_padding
lowercase__ = Image.new('''RGB''' , (image_width, image_height) , lowerCamelCase_ )
lowercase__ = ImageDraw.Draw(lowerCamelCase_ )
draw.text(xy=(left_padding, top_padding) , text=lowerCamelCase_ , fill=lowerCamelCase_ , font=lowerCamelCase_ )
return image
def a ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , '''vision''' )
# Convert to PIL image if necessary
lowercase__ = to_pil_image(lowerCamelCase_ )
lowercase__ = render_text(lowerCamelCase_ , **lowerCamelCase_ )
lowercase__ = max(header_image.width , image.width )
lowercase__ = int(image.height * (new_width / image.width) )
lowercase__ = int(header_image.height * (new_width / header_image.width) )
lowercase__ = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase__ = to_numpy_array(lowerCamelCase_ )
if infer_channel_dimension_format(lowerCamelCase_ ) == ChannelDimension.LAST:
lowercase__ = to_channel_dimension_format(lowerCamelCase_ , ChannelDimension.LAST )
return new_image
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""flattened_patches"""]
def __init__( self : str, lowerCamelCase : bool = True, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : int = 2_048, lowerCamelCase : bool = False, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = max_patches
lowercase__ = is_vqa
def lowercase__ ( self : Optional[int], lowerCamelCase : np.ndarray, lowerCamelCase : int, lowerCamelCase : dict, **lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches, '''torch''' )
_check_torch_version()
# convert to torch
lowercase__ = to_channel_dimension_format(lowerCamelCase, ChannelDimension.FIRST )
lowercase__ = torch.from_numpy(lowerCamelCase )
lowercase__ , lowercase__ = patch_size['''height'''], patch_size['''width''']
lowercase__ , lowercase__ = get_image_size(lowerCamelCase )
# maximize scale s.t.
lowercase__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase__ = max(min(math.floor(scale * image_height / patch_height ), lowerCamelCase ), 1 )
lowercase__ = max(min(math.floor(scale * image_width / patch_width ), lowerCamelCase ), 1 )
lowercase__ = max(num_feasible_rows * patch_height, 1 )
lowercase__ = max(num_feasible_cols * patch_width, 1 )
lowercase__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ), size=(resized_height, resized_width), mode='''bilinear''', align_corners=lowerCamelCase, antialias=lowerCamelCase, ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase__ = torch_extract_patches(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = patches.shape
lowercase__ = patches_shape[1]
lowercase__ = patches_shape[2]
lowercase__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase__ = torch.arange(lowerCamelCase ).reshape([rows, 1] ).repeat(1, lowerCamelCase ).reshape([rows * columns, 1] )
lowercase__ = torch.arange(lowerCamelCase ).reshape([1, columns] ).repeat(lowerCamelCase, 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase__ = row_ids.to(torch.floataa )
lowercase__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase__ = torch.cat([row_ids, col_ids, patches], -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase__ = torch.nn.functional.pad(lowerCamelCase, [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase__ = to_numpy_array(lowerCamelCase )
return result
def lowercase__ ( self : List[str], lowerCamelCase : np.ndarray, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase__ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase__ = np.mean(lowerCamelCase )
lowercase__ = np.std(lowerCamelCase )
lowercase__ = max(lowerCamelCase, 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Dict, lowerCamelCase : ImageInput, lowerCamelCase : Optional[str] = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Dict[str, int]] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : ChannelDimension = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ = patch_size if patch_size is not None else self.patch_size
lowercase__ = max_patches if max_patches is not None else self.max_patches
lowercase__ = self.is_vqa
if kwargs.get('''data_format''', lowerCamelCase ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
lowercase__ = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(lowerCamelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
lowercase__ = kwargs.pop('''font_bytes''', lowerCamelCase )
lowercase__ = kwargs.pop('''font_path''', lowerCamelCase )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [header_text] * len(lowerCamelCase )
lowercase__ = [
render_header(lowerCamelCase, header_text[i], font_bytes=lowerCamelCase, font_path=lowerCamelCase )
for i, image in enumerate(lowerCamelCase )
]
if do_normalize:
lowercase__ = [self.normalize(image=lowerCamelCase ) for image in images]
# convert to torch tensor and permute
lowercase__ = [
self.extract_flattened_patches(image=lowerCamelCase, max_patches=lowerCamelCase, patch_size=lowerCamelCase )
for image in images
]
# create attention mask in numpy
lowercase__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase__ = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks}, tensor_type=lowerCamelCase )
return encoded_outputs
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
A__ : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Any, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''', lowerCamelCase, )
super().__init__(*lowerCamelCase, **lowerCamelCase )
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
A__ : Union[str, Any] = 1.054_571_817e-34 # unit of ℏ : J * s
A__ : Dict = 3e8 # unit of c : m * s^-1
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
lowercase__ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase__ = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase__ = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.ModuleList(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Union[torch.Tensor, float, int], lowerCamelCase : torch.Tensor, lowerCamelCase : List[torch.tensor], lowerCamelCase : List[float], lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[Dict[str, Any]] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase, lowerCamelCase, self.nets ) ):
lowercase__ , lowercase__ = controlnet(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# merge samples
if i == 0:
lowercase__ , lowercase__ = down_samples, mid_sample
else:
lowercase__ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase, lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase__ ( self : Any, lowerCamelCase : Union[str, os.PathLike], lowerCamelCase : bool = True, lowerCamelCase : Callable = None, lowerCamelCase : bool = False, lowerCamelCase : Optional[str] = None, ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase, is_main_process=lowerCamelCase, save_function=lowerCamelCase, safe_serialization=lowerCamelCase, variant=lowerCamelCase, )
idx += 1
lowercase__ = model_path_to_save + F"""_{idx}"""
@classmethod
def lowercase__ ( cls : List[str], lowerCamelCase : Optional[Union[str, os.PathLike]], **lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowercase__ = pretrained_model_path
while os.path.isdir(lowerCamelCase ):
lowercase__ = ControlNetModel.from_pretrained(lowerCamelCase, **lowerCamelCase )
controlnets.append(lowerCamelCase )
idx += 1
lowercase__ = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(lowerCamelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowerCamelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowerCamelCase )
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ : Optional[int] = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = ['PerceiverFeatureExtractor']
A__ : Optional[int] = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=lowerCamelCase_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=lowerCamelCase_ , default=5 )
parser.add_argument('''--batch_size''' , type=lowerCamelCase_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=lowerCamelCase_ , default=1 )
parser.add_argument('''--freeze''' , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument('''--learning_rate''' , type=lowerCamelCase_ , default=5e-4 )
parser.add_argument('''--seed''' , type=lowerCamelCase_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=lowerCamelCase_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=lowerCamelCase_ , default=10 )
parser.add_argument('''--weight_decay''' , type=lowerCamelCase_ , default=0.01 )
parser.add_argument('''--output_dir''' , type=lowerCamelCase_ , default='''./results''' )
return parser.parse_args()
A__ : List[str] = load('accuracy')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ , lowercase__ = eval_pred
lowercase__ = np.argmax(lowerCamelCase_ , axis=1 )
return metric.compute(predictions=lowerCamelCase_ , references=lowerCamelCase_ )
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : str ):
'''simple docstring'''
super().__init__()
lowercase__ = trainer
def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], **lowerCamelCase : List[str] ):
'''simple docstring'''
if control.should_evaluate:
lowercase__ = deepcopy(lowerCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset, metric_key_prefix='''train''' )
return control_copy
def a ( ):
'''simple docstring'''
lowercase__ = get_args()
set_seed(args.seed )
lowercase__ = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
lowercase__ = dataset.train_test_split(test_size=0.2 )
lowercase__ = train_test['''test'''].train_test_split(test_size=0.5 )
lowercase__ = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
lowercase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase__ = tokenizer.eos_token
lowercase__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowercase__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase__ = False
lowercase__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(lowerCamelCase_ ):
lowercase__ = tokenizer(example['''src'''] , truncation=lowerCamelCase_ , max_length=1024 )
lowercase__ = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase__ = train_test_validation.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=train_test_validation['''train'''].column_names , )
lowercase__ = DataCollatorWithPadding(tokenizer=lowerCamelCase_ )
lowercase__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
lowercase__ = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(lowerCamelCase_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = None
if token is not None:
lowercase__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowercase__ = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
lowercase__ = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowercase__ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCamelCase_ ):
lowercase__ = requests.get(url + F"""&page={i + 2}""" , headers=lowerCamelCase_ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = None
if token is not None:
lowercase__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowercase__ = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
lowercase__ = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
lowercase__ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCamelCase_ ):
lowercase__ = requests.get(url + F"""&page={i + 2}""" , headers=lowerCamelCase_ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = None
if token is not None:
lowercase__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
lowercase__ = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
lowercase__ = result.headers['''Location''']
lowercase__ = requests.get(lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
lowercase__ = os.path.join(lowerCamelCase_ , F"""{artifact_name}.zip""" )
with open(lowerCamelCase_ , '''wb''' ) as fp:
fp.write(response.content )
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = None
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCamelCase_ ) as f:
for line in f:
lowercase__ = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowercase__ = line[: line.index(''': ''' )]
lowercase__ = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
lowercase__ = line[len('''FAILED ''' ) :]
failed_tests.append(lowerCamelCase_ )
elif filename == "job_name.txt":
lowercase__ = line
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCamelCase_ )} for `errors` """
F"""and {len(lowerCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
lowercase__ = None
if job_name and job_links:
lowercase__ = job_links.get(lowerCamelCase_ , lowerCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
lowercase__ = [x + [y] + [job_link] for x, y in zip(lowerCamelCase_ , lowerCamelCase_ )]
return result
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = []
lowercase__ = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCamelCase_ , job_links=lowerCamelCase_ ) )
return errors
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = Counter()
counter.update([x[1] for x in logs] )
lowercase__ = counter.most_common()
lowercase__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowercase__ = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
lowercase__ = dict(sorted(r.items() , key=lambda lowerCamelCase_ : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
lowercase__ = test.split('''/''' )[2]
else:
lowercase__ = None
return test
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowercase__ = [x for x in logs if x[2] is not None]
lowercase__ = {x[2] for x in logs}
lowercase__ = {}
for test in tests:
lowercase__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowercase__ = counter.most_common()
lowercase__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowercase__ = sum(error_counts.values() )
if n_errors > 0:
lowercase__ = {'''count''': n_errors, '''errors''': error_counts}
lowercase__ = dict(sorted(r.items() , key=lambda lowerCamelCase_ : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''| no. | error | status |'''
lowercase__ = '''|-:|:-|:-|'''
lowercase__ = [header, sep]
for error in reduced_by_error:
lowercase__ = reduced_by_error[error]['''count''']
lowercase__ = F"""| {count} | {error[:100]} | |"""
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''| model | no. of errors | major error | count |'''
lowercase__ = '''|-:|-:|-:|-:|'''
lowercase__ = [header, sep]
for model in reduced_by_model:
lowercase__ = reduced_by_model[model]['''count''']
lowercase__ , lowercase__ = list(reduced_by_model[model]['''errors'''].items() )[0]
lowercase__ = F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
A__ : List[Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A__ : Any = get_job_links(args.workflow_run_id, token=args.token)
A__ : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A__ : int = k.find(' / ')
A__ : Optional[Any] = k[index + len(' / ') :]
A__ : Any = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A__ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A__ : int = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A__ : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A__ : Optional[int] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A__ : Dict = reduce_by_error(errors)
A__ : List[str] = reduce_by_model(errors)
A__ : Any = make_github_table(reduced_by_error)
A__ : Dict = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( A__ ,A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = 32
lowercase__ = embedder_hidden_size
# image encoding components
lowercase__ = CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowercase__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowercase__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL()
lowercase__ = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def lowercase__ ( self : str, lowerCamelCase : List[str], lowerCamelCase : Any=0, lowerCamelCase : Optional[int]=True ):
'''simple docstring'''
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowercase__ = input_image * 0.5 + 0.5
lowercase__ = input_image.clamp(0, 1 )
lowercase__ = input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowercase__ = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowercase__ = sd_pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Dict ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(lowerCamelCase_ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , lowerCamelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[int] = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import functools
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# Validation
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(lowerCamelCase_ ) != 3 or not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(lowerCamelCase_ ) == 0:
return 0
if min(lowerCamelCase_ ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(lowerCamelCase_ ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
lowercase__ = set(lowerCamelCase_ )
@functools.cache
def dynamic_programming(lowerCamelCase_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
from collections.abc import Callable
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = a
lowercase__ = b
if function(lowerCamelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCamelCase_ ) == 0:
return b
elif (
function(lowerCamelCase_ ) * function(lowerCamelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowercase__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCamelCase_ ) == 0:
return mid
elif function(lowerCamelCase_ ) * function(lowerCamelCase_ ) < 0:
lowercase__ = mid
else:
lowercase__ = mid
lowercase__ = start + (end - start) / 2.0
return mid
def a ( lowerCamelCase_ ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
import argparse
from collections import defaultdict
import yaml
A__ : Union[str, Any] = 'docs/source/en/_toctree.yml'
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = []
lowercase__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(lowerCamelCase_ )
lowercase__ = new_doc_list
lowercase__ = [key for key, value in counts.items() if value > 1]
lowercase__ = []
for duplicate_key in duplicates:
lowercase__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(lowerCamelCase_ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
lowercase__ = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCamelCase_ ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(lowerCamelCase_ )
# Sort
return overview_doc
def a ( lowerCamelCase_=False ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__ = api_doc[scheduler_idx]['''sections''']
lowercase__ = clean_doc_toc(lowerCamelCase_ )
lowercase__ = False
if new_scheduler_doc != scheduler_doc:
lowercase__ = True
if overwrite:
lowercase__ = new_scheduler_doc
if diff:
if overwrite:
lowercase__ = api_doc
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def a ( lowerCamelCase_=False ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__ = False
lowercase__ = api_doc[pipeline_idx]['''sections''']
lowercase__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__ = pipeline_doc['''section''']
lowercase__ = clean_doc_toc(lowerCamelCase_ )
if overwrite:
lowercase__ = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCamelCase_ )
# sort overall pipeline doc
lowercase__ = clean_doc_toc(lowerCamelCase_ )
if new_pipeline_docs != pipeline_docs:
lowercase__ = True
if overwrite:
lowercase__ = new_pipeline_docs
if diff:
if overwrite:
lowercase__ = api_doc
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A__ : Optional[int] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
from PIL import Image
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(lowerCamelCase_ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(lowerCamelCase_ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
A__ : Optional[int] = change_contrast(img, 1_70)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A__ : List[Any] = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A__ : Tuple = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A__ : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''', id='''sequence''' ), id='''references''' ),
} ), codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''], reference_urls=[
'''https://github.com/m-popovic/chrF''',
], )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : int = CHRF.CHAR_ORDER, lowerCamelCase : int = CHRF.WORD_ORDER, lowerCamelCase : int = CHRF.BETA, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, ):
'''simple docstring'''
lowercase__ = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase__ = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
lowercase__ = CHRF(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = sb_chrf.corpus_score(lowerCamelCase, lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger(__name__)
A__ : List[Any] = ['model.decoder.embed_positions.weights']
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "emb" in name:
lowercase__ = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
lowercase__ = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
lowercase__ = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
lowercase__ = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
lowercase__ = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
lowercase__ = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
lowercase__ = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
lowercase__ = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
lowercase__ = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
lowercase__ = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase__ = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = list(state_dict.keys() )
lowercase__ = {}
for key in keys:
lowercase__ = state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_keys(lowerCamelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase__ = val[:hidden_size, :]
lowercase__ = val[hidden_size : 2 * hidden_size, :]
lowercase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase__ = val
else:
lowercase__ = val
return state_dict, enc_dec_proj_state_dict
def a ( lowerCamelCase_ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
lowercase__ = 1024
lowercase__ = 24
lowercase__ = 16
elif checkpoint == "medium":
lowercase__ = 1536
lowercase__ = 48
lowercase__ = 24
elif checkpoint == "large":
lowercase__ = 2048
lowercase__ = 48
lowercase__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowercase__ = MusicgenDecoderConfig(
hidden_size=lowerCamelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , )
return config
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="cpu" ):
'''simple docstring'''
lowercase__ = MusicGen.get_pretrained(lowerCamelCase_ , device=lowerCamelCase_ )
lowercase__ = decoder_config_from_checkpoint(lowerCamelCase_ )
lowercase__ = fairseq_model.lm.state_dict()
lowercase__ , lowercase__ = rename_state_dict(
lowerCamelCase_ , hidden_size=decoder_config.hidden_size )
lowercase__ = TaEncoderModel.from_pretrained('''t5-base''' )
lowercase__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
lowercase__ = MusicgenForCausalLM(lowerCamelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase__ , lowercase__ = decoder.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowercase__ = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase_ , audio_encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase_ )
# check we can do a forward pass
lowercase__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase__ = model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
lowercase__ = AutoTokenizer.from_pretrained('''t5-base''' )
lowercase__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
lowercase__ = MusicgenProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
# set the appropriate bos/pad token ids
lowercase__ = 2048
lowercase__ = 2048
# set other default generation config params
lowercase__ = int(30 * audio_encoder.config.frame_rate )
lowercase__ = True
lowercase__ = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowerCamelCase_ )
processor.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
A__ : Optional[int] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , ):
'''simple docstring'''
lowercase__ = {}
if train_file is not None:
lowercase__ = [train_file]
if eval_file is not None:
lowercase__ = [eval_file]
if test_file is not None:
lowercase__ = [test_file]
lowercase__ = datasets.load_dataset('''csv''' , data_files=lowerCamelCase_ )
lowercase__ = list(ds[list(files.keys() )[0]].features.keys() )
lowercase__ = features_name.pop(lowerCamelCase_ )
lowercase__ = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowercase__ = {label: i for i, label in enumerate(lowerCamelCase_ )}
lowercase__ = tokenizer.model_input_names
lowercase__ = {}
if len(lowerCamelCase_ ) == 1:
for k in files.keys():
lowercase__ = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' ) , batched=lowerCamelCase_ , )
elif len(lowerCamelCase_ ) == 2:
for k in files.keys():
lowercase__ = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , ) , batched=lowerCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowercase__ = {k: v for k, v in ex.items() if k in input_names}
lowercase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowercase__ = {k: v for k, v in ex.items() if k in input_names}
lowercase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowercase__ = {k: v for k, v in ex.items() if k in input_names}
lowercase__ = labelaid[ex[label_name]]
yield (d, label)
lowercase__ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowercase__ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowercase__ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowercase__ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowercase__ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowercase__ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
A__ : List[Any] = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(metadata={"""help""": """Which column contains the label"""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """The path of the training file"""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """The path of the development file"""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """The path of the test file"""} )
lowercase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ = field(
default=A__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
def a ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase_ ) -> Dict:
lowercase__ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowercase__ = TFTrainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(lowerCamelCase_ )
return results
if __name__ == "__main__":
main()
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
lowercase__ = emb.weight.data
return lin_layer
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = torch.load(lowerCamelCase_ , map_location='''cpu''' )
lowercase__ = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase__ = mam_aaa['''model''']
remove_ignore_keys_(lowerCamelCase_ )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MaMaaaConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MaMaaaForConditionalGeneration(lowerCamelCase_ )
model.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A__ : Optional[int] = parser.parse_args()
A__ : int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
from __future__ import annotations
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((lowercase__) , (lowercase__)) = extended_euclid(lowerCamelCase_ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
((lowercase__) , (lowercase__)) = extended_euclid(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
((lowercase__) , (lowercase__)) = extended_euclid(lowerCamelCase_ , lowerCamelCase_ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ , lowercase__ = invert_modulo(lowerCamelCase_ , lowerCamelCase_ ), invert_modulo(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = len(lowerCamelCase_ )
lowercase__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase__ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase__ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase__ = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Tuple=3, lowerCamelCase : int=32, lowerCamelCase : str=3, lowerCamelCase : Tuple=10, lowerCamelCase : Dict=[10, 20, 30, 40], lowerCamelCase : List[str]=[1, 1, 2, 1], lowerCamelCase : List[Any]=True, lowerCamelCase : int=True, lowerCamelCase : Optional[Any]="relu", lowerCamelCase : Dict=3, lowerCamelCase : Optional[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = RegNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase__ ( self : List[str], lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = RegNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = RegNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCamelCase )
for name, module in model.named_modules():
if isinstance(lowerCamelCase, (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ), msg=F"""Parameter {name} of model {model_class} seems not properly initialized""", )
self.assertTrue(
torch.all(module.bias == 0 ), msg=F"""Parameter {name} of model {model_class} seems not properly initialized""", )
def lowercase__ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : Dict ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = RegNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 2
while i * i <= n:
lowercase__ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a ( ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase_ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A__ : str = logging.get_logger()
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = field(default_factory=A__ )
lowercase__ = field(default_factory=A__ )
def lowercase__ ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : Tensor, lowerCamelCase : Tensor ):
'''simple docstring'''
lowercase__ = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase, nn.Convad ) or isinstance(lowerCamelCase, nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase )
def __call__( self : Dict, lowerCamelCase : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase )
[x.remove() for x in self.handles]
return self
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCamelCase : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0
lowercase__ = field(default_factory=A__ )
lowercase__ = field(default_factory=A__ )
def __call__( self : Any, lowerCamelCase : Tensor ):
'''simple docstring'''
lowercase__ = Tracker(self.dest )(lowerCamelCase ).parametrized
lowercase__ = Tracker(self.src )(lowerCamelCase ).parametrized
lowercase__ = list(filter(lambda lowerCamelCase : type(lowerCamelCase ) not in self.src_skip, lowerCamelCase ) )
lowercase__ = list(filter(lambda lowerCamelCase : type(lowerCamelCase ) not in self.dest_skip, lowerCamelCase ) )
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(lowerCamelCase )} operations while"""
F""" destination module has {len(lowerCamelCase )}.""" )
for dest_m, src_m in zip(lowerCamelCase, lowerCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True ):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
lowercase__ = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval()
lowercase__ = ResNetForImageClassification(lowerCamelCase_ ).eval()
lowercase__ = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ )
lowercase__ = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCamelCase_ )
assert torch.allclose(from_model(lowerCamelCase_ ) , our_model(lowerCamelCase_ ).logits ), "The model logits don't match the original one."
lowercase__ = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(lowerCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=lowerCamelCase_ , )
# we can use the convnext one
lowercase__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=lowerCamelCase_ , )
print(F"""Pushed {checkpoint_name}""" )
def a ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True ):
'''simple docstring'''
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = 1000
lowercase__ = (1, num_labels)
lowercase__ = '''huggingface/label-files'''
lowercase__ = num_labels
lowercase__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ )
lowercase__ = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(lowerCamelCase_ , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
A__ : Optional[int] = parser.parse_args()
A__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = b.T
lowercase__ = np.sum(np.square(lowerCamelCase_ ) , axis=1 )
lowercase__ = np.sum(np.square(lowerCamelCase_ ) , axis=0 )
lowercase__ = np.matmul(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = x.reshape(-1 , 3 )
lowercase__ = squared_euclidean_distance(lowerCamelCase_ , lowerCamelCase_ )
return np.argmin(lowerCamelCase_ , axis=1 )
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""pixel_values"""]
def __init__( self : Union[str, Any], lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCamelCase : bool = True, lowerCamelCase : bool = True, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = size if size is not None else {'''height''': 256, '''width''': 256}
lowercase__ = get_size_dict(lowerCamelCase )
lowercase__ = np.array(lowerCamelCase ) if clusters is not None else None
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_normalize
lowercase__ = do_color_quantize
def lowercase__ ( self : Optional[Any], lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
lowercase__ = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowerCamelCase, size=(size['''height'''], size['''width''']), resample=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : np.ndarray, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, ):
'''simple docstring'''
lowercase__ = rescale(image=lowerCamelCase, scale=1 / 127.5, data_format=lowerCamelCase )
lowercase__ = image - 1
return image
def lowercase__ ( self : Union[str, Any], lowerCamelCase : ImageInput, lowerCamelCase : bool = None, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST, **lowerCamelCase : Any, ):
'''simple docstring'''
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(lowerCamelCase )
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase__ = clusters if clusters is not None else self.clusters
lowercase__ = np.array(lowerCamelCase )
lowercase__ = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=lowerCamelCase ) for image in images]
if do_color_quantize:
lowercase__ = [to_channel_dimension_format(lowerCamelCase, ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase__ = np.array(lowerCamelCase )
lowercase__ = color_quantize(lowerCamelCase, lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase__ = images.shape[0]
lowercase__ = images.reshape(lowerCamelCase, -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase__ = list(lowerCamelCase )
else:
lowercase__ = [to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images]
lowercase__ = {'''input_ids''': images}
return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
from manim import *
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = Rectangle(height=0.5, width=0.5 )
lowercase__ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = VGroup(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = Text('''CPU''', font_size=24 )
lowercase__ = Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
lowercase__ = [mem.copy() for i in range(1 )]
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = Text('''GPU''', font_size=24 )
lowercase__ = Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
gpu.align_to(lowerCamelCase, lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = Text('''Model''', font_size=24 )
lowercase__ = Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase, run_time=1 ), Create(lowerCamelCase, run_time=1 ), Create(lowerCamelCase, run_time=1 ), )
lowercase__ = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""", font_size=24, )
lowercase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""", font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase, run_time=2.5 ), Write(lowerCamelCase ), Write(lowerCamelCase ) )
self.add(lowerCamelCase )
lowercase__ = []
lowercase__ = []
lowercase__ = []
for i, rect in enumerate(lowerCamelCase ):
lowercase__ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase, opacity=0.7 )
cpu_target.move_to(lowerCamelCase )
cpu_target.generate_target()
lowercase__ = 0.46 / 4
lowercase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=lowerCamelCase, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=lowerCamelCase, buff=0.0 )
cpu_targs.append(lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase ) )
second_animations.append(MoveToTarget(lowerCamelCase, run_time=1.5 ) )
self.play(*lowerCamelCase )
self.play(*lowerCamelCase )
self.wait()
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : list ):
'''simple docstring'''
lowercase__ = set_counts
lowercase__ = max(lowerCamelCase )
lowercase__ = len(lowerCamelCase )
lowercase__ = [1] * num_sets
lowercase__ = list(range(lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.get_parent(lowerCamelCase )
lowercase__ = self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase__ = 0
lowercase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase__ = 0
lowercase__ = src_parent
lowercase__ = self.set_counts[src_parent]
lowercase__ = max(self.max_set, lowerCamelCase )
return True
def lowercase__ ( self : Any, lowerCamelCase : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import argparse
import os
import re
A__ : Dict = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
A__ : List[Any] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
A__ : Union[str, Any] = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def a ( lowerCamelCase_ , lowerCamelCase_ = False ):
'''simple docstring'''
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
lowercase__ = content.split('''\n''' )
lowercase__ = []
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase__ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase__ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase__ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase__ = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : _re_identifier.search(lowerCamelCase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
elif "\n".join(lowerCamelCase_ ) != content:
return True
def a ( lowerCamelCase_ = False ):
'''simple docstring'''
lowercase__ = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for f in os.listdir(lowerCamelCase_ ) if f.endswith('''.py''' )]
lowercase__ = [sort_auto_mapping(lowerCamelCase_ , overwrite=lowerCamelCase_ ) for fname in fnames]
if not overwrite and any(lowerCamelCase_ ):
lowercase__ = [f for f, d in zip(lowerCamelCase_ , lowerCamelCase_ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowerCamelCase_ )}. Run `make style` to fix"""
''' this.''' )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : Dict = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = len(lowerCamelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCamelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a ( lowerCamelCase_ ):
'''simple docstring'''
if len(lowerCamelCase_ ) <= 1:
return arr, 0
lowercase__ = len(lowerCamelCase_ ) // 2
lowercase__ = arr[0:mid]
lowercase__ = arr[mid:]
lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ )
lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ )
lowercase__ , lowercase__ = _count_cross_inversions(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = lowercase__ = lowercase__ = 0
while i < len(lowerCamelCase_ ) and j < len(lowerCamelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCamelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCamelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a ( ):
'''simple docstring'''
lowercase__ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase__ = count_inversions_bf(lowerCamelCase_ )
lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , lowerCamelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase__ = count_inversions_bf(lowerCamelCase_ )
lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , lowerCamelCase_ )
# an empty list should also have zero inversions
lowercase__ = []
lowercase__ = count_inversions_bf(lowerCamelCase_ )
lowercase__ , lowercase__ = count_inversions_recursive(lowerCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
A__ : Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """upernet"""
def __init__( self : Optional[int], lowerCamelCase : Any=None, lowerCamelCase : int=512, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : Optional[int]=[1, 2, 3, 6], lowerCamelCase : str=True, lowerCamelCase : Tuple=0.4, lowerCamelCase : Dict=384, lowerCamelCase : Union[str, Any]=256, lowerCamelCase : int=1, lowerCamelCase : List[Any]=False, lowerCamelCase : List[Any]=255, **lowerCamelCase : Any, ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = backbone_config.get('''model_type''' )
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(lowerCamelCase )
lowercase__ = backbone_config
lowercase__ = hidden_size
lowercase__ = initializer_range
lowercase__ = pool_scales
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_in_channels
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = loss_ignore_index
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = len(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
lowercase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowercase__ = True
for i in range(lowerCamelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowercase__ = True
if a[i].islower():
lowercase__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : NestedDataStructureLike[PathLike], lowerCamelCase : Optional[NamedSplit] = None, lowerCamelCase : Optional[Features] = None, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, split=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase, streaming=lowerCamelCase, num_proc=lowerCamelCase, **lowerCamelCase, )
lowercase__ = field
lowercase__ = path_or_paths if isinstance(lowerCamelCase, lowerCamelCase ) else {self.split: path_or_paths}
lowercase__ = Json(
cache_dir=lowerCamelCase, data_files=lowerCamelCase, features=lowerCamelCase, field=lowerCamelCase, **lowerCamelCase, )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
# Build iterable dataset
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=lowerCamelCase, download_mode=lowerCamelCase, verification_mode=lowerCamelCase, base_path=lowerCamelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase, in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Dataset, lowerCamelCase : Union[PathLike, BinaryIO], lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, **lowerCamelCase : int, ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
lowercase__ = dataset
lowercase__ = path_or_buf
lowercase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase__ = num_proc
lowercase__ = '''utf-8'''
lowercase__ = to_json_kwargs
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.to_json_kwargs.pop('''path_or_buf''', lowerCamelCase )
lowercase__ = self.to_json_kwargs.pop('''orient''', '''records''' )
lowercase__ = self.to_json_kwargs.pop('''lines''', True if orient == '''records''' else False )
lowercase__ = self.to_json_kwargs.pop('''index''', False if orient in ['''split''', '''table'''] else True )
lowercase__ = self.to_json_kwargs.pop('''compression''', lowerCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf, '''wb''', compression=lowerCamelCase ) as buffer:
lowercase__ = self._write(file_obj=lowerCamelCase, orient=lowerCamelCase, lines=lowerCamelCase, index=lowerCamelCase, **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
lowercase__ = self._write(
file_obj=self.path_or_buf, orient=lowerCamelCase, lines=lowerCamelCase, index=lowerCamelCase, **self.to_json_kwargs )
return written
def lowercase__ ( self : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = args
lowercase__ = query_table(
table=self.dataset.data, key=slice(lowerCamelCase, offset + self.batch_size ), indices=self.dataset._indices, )
lowercase__ = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase, orient=lowerCamelCase, lines=lowerCamelCase, index=lowerCamelCase, **lowerCamelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase__ ( self : Dict, lowerCamelCase : BinaryIO, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], **lowerCamelCase : Any, ):
'''simple docstring'''
lowercase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset ), self.batch_size ), unit='''ba''', disable=not logging.is_progress_bar_enabled(), desc='''Creating json from Arrow format''', ):
lowercase__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase )
else:
lowercase__ , lowercase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json, [(offset, orient, lines, index, to_json_kwargs) for offset in range(0, lowerCamelCase, lowerCamelCase )], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit='''ba''', disable=not logging.is_progress_bar_enabled(), desc='''Creating json from Arrow format''', ):
written += file_obj.write(lowerCamelCase )
return written
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
import math
def a ( lowerCamelCase_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( lowerCamelCase_ = 0.1 ):
'''simple docstring'''
lowercase__ = 3
lowercase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
lowercase__ = 0
lowercase__ = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
lowercase__ = [int(lowerCamelCase_ ) for i in num_string]
lowercase__ = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
lowercase__ = str(lowerCamelCase_ )
steps += 1
return steps
def a ( lowerCamelCase_ ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
lowercase__ = 0
lowercase__ = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
lowercase__ = [int(lowerCamelCase_ ) for i in num_string]
lowercase__ = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
lowercase__ = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
A__ : str = 'hf-internal-testing/tiny-random-bert'
A__ : Optional[int] = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
A__ : List[str] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = cached_file(lowerCamelCase, lowerCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCamelCase, lowerCamelCase ) ) )
with open(os.path.join(lowerCamelCase, '''refs''', '''main''' ) ) as f:
lowercase__ = f.read()
self.assertEqual(lowerCamelCase, os.path.join(lowerCamelCase, '''snapshots''', lowerCamelCase, lowerCamelCase ) )
self.assertTrue(os.path.isfile(lowerCamelCase ) )
# File is cached at the same place the second time.
lowercase__ = cached_file(lowerCamelCase, lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
# Using a specific revision to test the full commit hash.
lowercase__ = cached_file(lowerCamelCase, lowerCamelCase, revision='''9b8c223''' )
self.assertEqual(lowerCamelCase, os.path.join(lowerCamelCase, '''snapshots''', lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase, '''is not a valid model identifier''' ):
lowercase__ = cached_file('''tiny-random-bert''', lowerCamelCase )
with self.assertRaisesRegex(lowerCamelCase, '''is not a valid git identifier''' ):
lowercase__ = cached_file(lowerCamelCase, lowerCamelCase, revision='''aaaa''' )
with self.assertRaisesRegex(lowerCamelCase, '''does not appear to have a file named''' ):
lowercase__ = cached_file(lowerCamelCase, '''conf''' )
def lowercase__ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase, '''does not appear to have a file named''' ):
lowercase__ = cached_file(lowerCamelCase, '''conf''' )
with open(os.path.join(lowerCamelCase, '''refs''', '''main''' ) ) as f:
lowercase__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, '''.no_exist''', lowerCamelCase, '''conf''' ) ) )
lowercase__ = cached_file(lowerCamelCase, '''conf''', _raise_exceptions_for_missing_entries=lowerCamelCase )
self.assertIsNone(lowerCamelCase )
lowercase__ = cached_file(lowerCamelCase, '''conf''', local_files_only=lowerCamelCase, _raise_exceptions_for_missing_entries=lowerCamelCase )
self.assertIsNone(lowerCamelCase )
lowercase__ = mock.Mock()
lowercase__ = 500
lowercase__ = {}
lowercase__ = HTTPError
lowercase__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=lowerCamelCase ) as mock_head:
lowercase__ = cached_file(lowerCamelCase, '''conf''', _raise_exceptions_for_connection_errors=lowerCamelCase )
self.assertIsNone(lowerCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self : str ):
'''simple docstring'''
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''', lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''', lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''', lowerCamelCase ) )
def lowercase__ ( self : str ):
'''simple docstring'''
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''', '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCamelCase, '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''', lowerCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCamelCase, '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''', lowerCamelCase, revision='''ahaha''' )
lowercase__ = get_file_from_repo('''bert-base-cased''', lowerCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__ = json.loads(open(lowerCamelCase, '''r''' ).read() )
self.assertEqual(config['''hidden_size'''], 768 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = Path(lowerCamelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(lowerCamelCase, '''a.txt''' ), str(lowerCamelCase ) )
self.assertIsNone(get_file_from_repo(lowerCamelCase, '''b.txt''' ) )
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : int = 16
A__ : int = 32
def a ( lowerCamelCase_ , lowerCamelCase_ = 16 ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
lowerCamelCase_ , padding='''longest''' , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
lowercase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : Dict = mocked_dataloaders # noqa: F811
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCamelCase_ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCamelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['''lr''']
lowercase__ = int(config['''num_epochs'''] )
lowercase__ = int(config['''seed'''] )
lowercase__ = int(config['''batch_size'''] )
lowercase__ = evaluate.load('''glue''' , '''mrpc''' )
set_seed(lowerCamelCase_ )
lowercase__ , lowercase__ = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCamelCase_ ):
lowercase__ = model(**lowerCamelCase_ )
lowercase__ = output.loss
accelerator.backward(lowerCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**lowerCamelCase_ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCamelCase_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ = parser.parse_args()
lowercase__ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = AudioLDMPipeline
lowercase__ = TEXT_TO_AUDIO_PARAMS
lowercase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase__ = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=(32, 64), class_embed_type='''simple_projection''', projection_class_embeddings_input_dim=32, class_embeddings_concat=lowerCamelCase, )
lowercase__ = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowercase__ = ClapTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, projection_dim=32, )
lowercase__ = ClapTextModelWithProjection(lowerCamelCase )
lowercase__ = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''', model_max_length=77 )
lowercase__ = SpeechTaHifiGanConfig(
model_in_dim=8, sampling_rate=16_000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=lowerCamelCase, )
lowercase__ = SpeechTaHifiGan(lowerCamelCase )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[Any]=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * [inputs['''prompt''']]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * [inputs.pop('''prompt''' )]
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=audioldm_pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = text_inputs['''input_ids'''].to(lowerCamelCase )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase, )
lowercase__ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase, dim=-1 )
lowercase__ = prompt_embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * ['''this is a negative prompt''']
lowercase__ = negative_prompt
lowercase__ = 3 * [inputs['''prompt''']]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * [inputs.pop('''prompt''' )]
lowercase__ = []
for p in [prompt, negative_prompt]:
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=audioldm_pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = text_inputs['''input_ids'''].to(lowerCamelCase )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase, )
lowercase__ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase, dim=-1 )
embeds.append(lowerCamelCase )
lowercase__ , lowercase__ = embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = '''egg cracking'''
lowercase__ = audioldm_pipe(**lowerCamelCase, negative_prompt=lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe([prompt] * batch_size, num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase__ = 2
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=2, num_waveforms_per_prompt=lowerCamelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe(
[prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=lowerCamelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = audioldm_pipe.vocoder.config.sampling_rate
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = audioldm_pipe(audio_length_in_s=0.016, **lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) / vocoder_sampling_rate == 0.016
lowercase__ = audioldm_pipe(audio_length_in_s=0.032, **lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) / vocoder_sampling_rate == 0.032
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = ['''hey''']
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=1 )
lowercase__ = output.audios.shape
assert audio_shape == (1, 256)
lowercase__ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase__ = SpeechTaHifiGan(lowerCamelCase ).to(lowerCamelCase )
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=1 )
lowercase__ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowercase__ ( self : int ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase )
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[Any]="cpu", lowerCamelCase : List[Any]=torch.floataa, lowerCamelCase : List[str]=0 ):
'''simple docstring'''
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = np.random.RandomState(lowerCamelCase ).standard_normal((1, 8, 128, 16) )
lowercase__ = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase, dtype=lowerCamelCase )
lowercase__ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_inputs(lowerCamelCase )
lowercase__ = 25
lowercase__ = audioldm_pipe(**lowerCamelCase ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 81_920
lowercase__ = audio[77_230:77_240]
lowercase__ = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_inputs(lowerCamelCase )
lowercase__ = audioldm_pipe(**lowerCamelCase ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 81_920
lowercase__ = audio[27_780:27_790]
lowercase__ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a ( ):
'''simple docstring'''
lowercase__ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowerCamelCase_ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowerCamelCase_ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowerCamelCase_ )
return parser.parse_args()
def a ( ):
'''simple docstring'''
lowercase__ = parse_args()
# Import training_script as a module.
lowercase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase__ = script_fpath.stem
lowercase__ = importlib.import_module(lowerCamelCase_ )
# Patch sys.argv
lowercase__ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 384
lowercase__ = 7
if "tiny" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 6, 2)
lowercase__ = (3, 6, 12, 24)
elif "small" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 18, 2)
lowercase__ = (3, 6, 12, 24)
elif "base" in model_name:
lowercase__ = 128
lowercase__ = (2, 2, 18, 2)
lowercase__ = (4, 8, 16, 32)
lowercase__ = 12
lowercase__ = 512
elif "large" in model_name:
lowercase__ = 192
lowercase__ = (2, 2, 18, 2)
lowercase__ = (6, 12, 24, 48)
lowercase__ = 12
lowercase__ = 768
# set label information
lowercase__ = 150
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''ade20k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowercase__ = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dct.pop(lowerCamelCase_ )
lowercase__ = val
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
lowercase__ = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:dim, :]
lowercase__ = in_proj_bias[: dim]
lowercase__ = in_proj_weight[
dim : dim * 2, :
]
lowercase__ = in_proj_bias[
dim : dim * 2
]
lowercase__ = in_proj_weight[
-dim :, :
]
lowercase__ = in_proj_bias[-dim :]
# fmt: on
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ , lowercase__ = x.shape
lowercase__ = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
lowercase__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ , lowercase__ = x.shape
lowercase__ = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
lowercase__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = x.shape[0]
lowercase__ = x.reshape(4 , in_channel // 4 )
lowercase__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = x.shape[0]
lowercase__ = x.reshape(in_channel // 4 , 4 )
lowercase__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='''cpu''' , file_name=lowerCamelCase_ )[
'''state_dict'''
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
lowercase__ = get_upernet_config(lowerCamelCase_ )
lowercase__ = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
lowercase__ = key.replace('''bn''' , '''batch_norm''' )
lowercase__ = val
# rename keys
lowercase__ = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase__ = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
lowercase__ = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
lowercase__ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = SegformerImageProcessor()
lowercase__ = processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowercase__ = model(lowerCamelCase_ )
lowercase__ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase__ = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
lowercase__ = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
lowercase__ = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
lowercase__ = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A__ : List[str] = logging.get_logger(__name__)
A__ : List[Any] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int, lowerCamelCase : Dict=None, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
lowercase__ = model
lowercase__ = kwargs.get('''model_save_dir''', lowerCamelCase )
lowercase__ = kwargs.get('''latest_model_name''', lowerCamelCase )
def __call__( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = {k: np.array(lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(lowerCamelCase, lowerCamelCase )
@staticmethod
def lowercase__ ( lowerCamelCase : Union[str, Path], lowerCamelCase : Dict=None, lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
lowercase__ = '''CPUExecutionProvider'''
return ort.InferenceSession(lowerCamelCase, providers=[provider], sess_options=lowerCamelCase )
def lowercase__ ( self : List[Any], lowerCamelCase : Union[str, Path], lowerCamelCase : Optional[str] = None, **lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowercase__ = self.model_save_dir.joinpath(self.latest_model_name )
lowercase__ = Path(lowerCamelCase ).joinpath(lowerCamelCase )
try:
shutil.copyfile(lowerCamelCase, lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowercase__ = self.model_save_dir.joinpath(lowerCamelCase )
if src_path.exists():
lowercase__ = Path(lowerCamelCase ).joinpath(lowerCamelCase )
try:
shutil.copyfile(lowerCamelCase, lowerCamelCase )
except shutil.SameFileError:
pass
def lowercase__ ( self : Tuple, lowerCamelCase : Union[str, os.PathLike], **lowerCamelCase : str, ):
'''simple docstring'''
if os.path.isfile(lowerCamelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase )
# saving model weights/files
self._save_pretrained(lowerCamelCase, **lowerCamelCase )
@classmethod
def lowercase__ ( cls : Optional[int], lowerCamelCase : Union[str, Path], lowerCamelCase : Optional[Union[bool, str, None]] = None, lowerCamelCase : Optional[Union[str, None]] = None, lowerCamelCase : bool = False, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional["ort.SessionOptions"] = None, **lowerCamelCase : List[Any], ):
'''simple docstring'''
lowercase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCamelCase ):
lowercase__ = OnnxRuntimeModel.load_model(
os.path.join(lowerCamelCase, lowerCamelCase ), provider=lowerCamelCase, sess_options=lowerCamelCase )
lowercase__ = Path(lowerCamelCase )
# load model from hub
else:
# download model
lowercase__ = hf_hub_download(
repo_id=lowerCamelCase, filename=lowerCamelCase, use_auth_token=lowerCamelCase, revision=lowerCamelCase, cache_dir=lowerCamelCase, force_download=lowerCamelCase, )
lowercase__ = Path(lowerCamelCase ).parent
lowercase__ = Path(lowerCamelCase ).name
lowercase__ = OnnxRuntimeModel.load_model(lowerCamelCase, provider=lowerCamelCase, sess_options=lowerCamelCase )
return cls(model=lowerCamelCase, **lowerCamelCase )
@classmethod
def lowercase__ ( cls : List[Any], lowerCamelCase : Union[str, Path], lowerCamelCase : bool = True, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[str] = None, **lowerCamelCase : Dict, ):
'''simple docstring'''
lowercase__ = None
if len(str(lowerCamelCase ).split('''@''' ) ) == 2:
lowercase__ , lowercase__ = model_id.split('''@''' )
return cls._from_pretrained(
model_id=lowerCamelCase, revision=lowerCamelCase, cache_dir=lowerCamelCase, force_download=lowerCamelCase, use_auth_token=lowerCamelCase, **lowerCamelCase, )
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
import operator
def a ( lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None ):
'''simple docstring'''
lowercase__ = operator.lt if reverse else operator.gt
lowercase__ = solution or []
if not arr:
return solution
lowercase__ = [arr.pop(0 )]
for i, item in enumerate(lowerCamelCase_ ):
if _operator(lowerCamelCase_ , sublist[-1] ):
sublist.append(lowerCamelCase_ )
arr.pop(lowerCamelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase_ )
else:
while sublist:
lowercase__ = sublist.pop(0 )
for i, xx in enumerate(lowerCamelCase_ ):
if not _operator(lowerCamelCase_ , lowerCamelCase_ ):
solution.insert(lowerCamelCase_ , lowerCamelCase_ )
break
else:
solution.append(lowerCamelCase_ )
strand_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : Tuple = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ['ChineseCLIPFeatureExtractor']
A__ : List[Any] = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
lowercase__ = None
lowercase__ = None
def __iter__( self : Dict ):
'''simple docstring'''
lowercase__ = self.head
while self.head:
yield node.data
lowercase__ = node.next
if node == self.head:
break
def __len__( self : Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[Any] ):
'''simple docstring'''
return "->".join(str(lowerCamelCase ) for item in iter(self ) )
def lowercase__ ( self : int, lowerCamelCase : Any ):
'''simple docstring'''
self.insert_nth(len(self ), lowerCamelCase )
def lowercase__ ( self : List[Any], lowerCamelCase : Any ):
'''simple docstring'''
self.insert_nth(0, lowerCamelCase )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
lowercase__ = Node(lowerCamelCase )
if self.head is None:
lowercase__ = new_node # first node points itself
lowercase__ = lowercase__ = new_node
elif index == 0: # insert at head
lowercase__ = self.head
lowercase__ = lowercase__ = new_node
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = new_node
if index == len(self ) - 1: # insert at tail
lowercase__ = new_node
def lowercase__ ( self : int ):
'''simple docstring'''
return self.delete_nth(0 )
def lowercase__ ( self : Dict ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
lowercase__ = self.head
if self.head == self.tail: # just one node
lowercase__ = lowercase__ = None
elif index == 0: # delete head node
lowercase__ = self.tail.next.next
lowercase__ = self.head.next
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowercase__ = temp
return delete_node.data
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return len(self ) == 0
def a ( ):
'''simple docstring'''
lowercase__ = CircularLinkedList()
assert len(lowerCamelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowerCamelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowerCamelCase_ ) == i
circular_linked_list.insert_nth(lowerCamelCase_ , i + 1 )
assert str(lowerCamelCase_ ) == "->".join(str(lowerCamelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowerCamelCase_ ) == "->".join(str(lowerCamelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowerCamelCase_ ) == "->".join(str(lowerCamelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowerCamelCase_ ) == "->".join(str(lowerCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowerCamelCase_ ) == "->".join(str(lowerCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = len(lowerCamelCase_ )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase__ = 0
print(lowerCamelCase_ , end=''',''' )
# Consider rest of the activities
for j in range(lowerCamelCase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCamelCase_ , end=''',''' )
lowercase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : str = [1, 3, 0, 5, 8, 5]
A__ : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = TextaTextGenerationPipeline(model=lowerCamelCase, tokenizer=lowerCamelCase )
return generator, ["Something to write", "Something else"]
def lowercase__ ( self : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = generator('''Something there''' )
self.assertEqual(lowerCamelCase, [{'''generated_text''': ANY(lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
lowercase__ = generator(['''This is great !''', '''Something else'''], num_return_sequences=2, do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
], )
lowercase__ = generator(
['''This is great !''', '''Something else'''], num_return_sequences=2, batch_size=2, do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
], )
with self.assertRaises(lowerCamelCase ):
generator(4 )
@require_torch
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = pipeline('''text2text-generation''', model='''patrickvonplaten/t5-tiny-random''', framework='''pt''' )
# do_sample=False necessary for reproducibility
lowercase__ = generator('''Something there''', do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase, [{'''generated_text''': ''''''}] )
lowercase__ = 3
lowercase__ = generator(
'''Something there''', num_return_sequences=lowerCamelCase, num_beams=lowerCamelCase, )
lowercase__ = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = generator('''This is a test''', do_sample=lowerCamelCase, num_return_sequences=2, return_tensors=lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
], )
lowercase__ = generator.model.config.eos_token_id
lowercase__ = '''<pad>'''
lowercase__ = generator(
['''This is a test''', '''This is a second test'''], do_sample=lowerCamelCase, num_return_sequences=2, batch_size=2, return_tensors=lowerCamelCase, )
self.assertEqual(
lowerCamelCase, [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
], )
@require_tf
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = pipeline('''text2text-generation''', model='''patrickvonplaten/t5-tiny-random''', framework='''tf''' )
# do_sample=False necessary for reproducibility
lowercase__ = generator('''Something there''', do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase, [{'''generated_text''': ''''''}] )
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
import os
A__ : List[str] = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 0
while index < len(lowerCamelCase_ ) - 1:
lowercase__ = SYMBOLS[numerals[index]]
lowercase__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ''''''
lowercase__ = num // 1000
numerals += m_count * "M"
num %= 1000
lowercase__ = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowercase__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a ( lowerCamelCase_ = "/p089_roman.txt" ):
'''simple docstring'''
lowercase__ = 0
with open(os.path.dirname(lowerCamelCase_ ) + roman_numerals_filename ) as filea:
lowercase__ = filea.readlines()
for line in lines:
lowercase__ = line.strip()
lowercase__ = parse_roman_numerals(lowerCamelCase_ )
lowercase__ = generate_roman_numerals(lowerCamelCase_ )
savings += len(lowerCamelCase_ ) - len(lowerCamelCase_ )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
# Lint as: python3
import itertools
import os
import re
A__ : List[str] = re.compile(r'([A-Z]+)([A-Z][a-z])')
A__ : Optional[Any] = re.compile(r'([a-z\d])([A-Z])')
A__ : int = re.compile(r'(?<!_)_(?!_)')
A__ : int = re.compile(r'(_{2,})')
A__ : List[str] = r'^\w+(\.\w+)*$'
A__ : Any = r'<>:/\|?*'
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _uppercase_uppercase_re.sub(r'''\1_\2''' , lowerCamelCase_ )
lowercase__ = _lowercase_uppercase_re.sub(r'''\1_\2''' , lowerCamelCase_ )
return name.lower()
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _single_underscore_re.split(lowerCamelCase_ )
lowercase__ = [_multiple_underscores_re.split(lowerCamelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase_ ) if n != '''''' )
def a ( lowerCamelCase_ ):
'''simple docstring'''
if os.path.basename(lowerCamelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if os.path.basename(lowerCamelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase_ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase_ )}-{split}"""
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = filename_prefix_for_split(lowerCamelCase_ , lowerCamelCase_ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
lowercase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
return F"""{filepath}*"""
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = filename_prefix_for_split(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if shard_lengths:
lowercase__ = len(lowerCamelCase_ )
lowercase__ = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase_ )]
if filetype_suffix:
lowercase__ = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : str = "▁", lowerCamelCase : bool = True, lowerCamelCase : Union[str, AddedToken] = "<unk>", lowerCamelCase : Union[str, AddedToken] = "</s>", lowerCamelCase : Union[str, AddedToken] = "<pad>", ):
'''simple docstring'''
lowercase__ = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict['''token''']
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ), ''' ''' ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCamelCase, add_prefix_space=lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=lowerCamelCase, add_prefix_space=lowerCamelCase )
lowercase__ = TemplateProcessing(
single=F"""$A {self.special_tokens['eos']['token']}""", special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])], )
lowercase__ = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : Union[str, List[str]], lowerCamelCase : int = 8_000, lowerCamelCase : bool = True, ):
'''simple docstring'''
lowercase__ = trainers.UnigramTrainer(
vocab_size=lowerCamelCase, special_tokens=self.special_tokens_list, show_progress=lowerCamelCase, )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [files]
self._tokenizer.train(lowerCamelCase, trainer=lowerCamelCase )
self.add_unk_id()
def lowercase__ ( self : Any, lowerCamelCase : Union[Iterator[str], Iterator[Iterator[str]]], lowerCamelCase : int = 8_000, lowerCamelCase : bool = True, ):
'''simple docstring'''
lowercase__ = trainers.UnigramTrainer(
vocab_size=lowerCamelCase, special_tokens=self.special_tokens_list, show_progress=lowerCamelCase, )
self._tokenizer.train_from_iterator(lowerCamelCase, trainer=lowerCamelCase )
self.add_unk_id()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens['''unk''']['''id''']
lowercase__ = Tokenizer.from_str(json.dumps(lowerCamelCase ) )
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any]=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : List[Any]=True, lowerCamelCase : Optional[int]=False, lowerCamelCase : Optional[int]=True, lowerCamelCase : Dict=99, lowerCamelCase : str=32, lowerCamelCase : Optional[int]=5, lowerCamelCase : Any=4, lowerCamelCase : List[str]=64, lowerCamelCase : Tuple="gelu", lowerCamelCase : List[str]=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : Optional[int]=512, lowerCamelCase : Any=16, lowerCamelCase : List[Any]=2, lowerCamelCase : Any=0.02, lowerCamelCase : str=3, lowerCamelCase : Dict=4, lowerCamelCase : List[str]=None, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Optional[int]=2, lowerCamelCase : List[str]=2, lowerCamelCase : Optional[Any]=2, lowerCamelCase : Optional[Any]=4, lowerCamelCase : List[str]=1, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = q_groups
lowercase__ = k_groups
lowercase__ = v_groups
lowercase__ = post_attention_groups
lowercase__ = intermediate_groups
lowercase__ = output_groups
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = SqueezeBertModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = SqueezeBertForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = SqueezeBertForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = SqueezeBertForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = SqueezeBertForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = SqueezeBertForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase__ = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = True
lowercase__ = False
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SqueezeBertModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, dim=37 )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase )
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = SqueezeBertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
lowercase__ = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
lowercase__ = model(lowerCamelCase )[0]
lowercase__ = torch.Size((1, 3) )
self.assertEqual(output.shape, lowerCamelCase )
lowercase__ = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4 ) )
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : List[Any] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """unispeech-sat"""
def __init__( self : Optional[int], lowerCamelCase : Any=32, lowerCamelCase : Optional[Any]=768, lowerCamelCase : List[str]=12, lowerCamelCase : List[Any]=12, lowerCamelCase : Optional[int]=3_072, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : str=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : int=0.1, lowerCamelCase : int=0.0, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : str=0.02, lowerCamelCase : Optional[Any]=1E-5, lowerCamelCase : Tuple="group", lowerCamelCase : List[str]="gelu", lowerCamelCase : int=(512, 512, 512, 512, 512, 512, 512), lowerCamelCase : Optional[Any]=(5, 2, 2, 2, 2, 2, 2), lowerCamelCase : Optional[Any]=(10, 3, 3, 3, 3, 2, 2), lowerCamelCase : Optional[int]=False, lowerCamelCase : Optional[Any]=128, lowerCamelCase : Optional[int]=16, lowerCamelCase : Optional[int]=False, lowerCamelCase : Dict=True, lowerCamelCase : Any=0.05, lowerCamelCase : Optional[Any]=10, lowerCamelCase : Any=2, lowerCamelCase : Dict=0.0, lowerCamelCase : Union[str, Any]=10, lowerCamelCase : Dict=0, lowerCamelCase : List[Any]=320, lowerCamelCase : Any=2, lowerCamelCase : List[Any]=0.1, lowerCamelCase : Any=100, lowerCamelCase : int=256, lowerCamelCase : Tuple=256, lowerCamelCase : Any=0.1, lowerCamelCase : Dict="mean", lowerCamelCase : Any=False, lowerCamelCase : Dict=False, lowerCamelCase : Optional[int]=256, lowerCamelCase : Optional[int]=(512, 512, 512, 512, 1_500), lowerCamelCase : Optional[Any]=(5, 3, 3, 1, 1), lowerCamelCase : Union[str, Any]=(1, 2, 3, 1, 1), lowerCamelCase : List[str]=512, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Optional[int]=1, lowerCamelCase : int=2, lowerCamelCase : int=504, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(**lowerCamelCase, pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(lowerCamelCase )
lowercase__ = list(lowerCamelCase )
lowercase__ = list(lowerCamelCase )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
lowercase__ = num_clusters
lowercase__ = do_stable_layer_norm
lowercase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ = num_codevectors_per_group
lowercase__ = num_codevector_groups
lowercase__ = contrastive_logits_temperature
lowercase__ = feat_quantizer_dropout
lowercase__ = num_negatives
lowercase__ = codevector_dim
lowercase__ = proj_codevector_dim
lowercase__ = diversity_loss_weight
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(lowerCamelCase )
lowercase__ = list(lowerCamelCase )
lowercase__ = list(lowerCamelCase )
lowercase__ = xvector_output_dim
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Tuple = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
@add_start_docstrings(lowerCamelCase )
def __call__( self : Optional[Any], lowerCamelCase : torch.LongTensor, lowerCamelCase : torch.FloatTensor, **lowerCamelCase : Any ):
'''simple docstring'''
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
lowercase__ = max_length
lowercase__ = max_position_embeddings
@add_start_docstrings(lowerCamelCase )
def __call__( self : Optional[Any], lowerCamelCase : torch.LongTensor, lowerCamelCase : torch.FloatTensor, **lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = input_ids.shape[-1]
lowercase__ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'''with `max_length = start_length + max_new_tokens` instead.''', lowerCamelCase, )
lowercase__ = start_length
lowercase__ = max_new_tokens
lowercase__ = start_length + max_new_tokens
@add_start_docstrings(lowerCamelCase )
def __call__( self : Dict, lowerCamelCase : torch.LongTensor, lowerCamelCase : torch.FloatTensor, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : float, lowerCamelCase : Optional[float] = None ):
'''simple docstring'''
lowercase__ = max_time
lowercase__ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowerCamelCase )
def __call__( self : Optional[Any], lowerCamelCase : torch.LongTensor, lowerCamelCase : torch.FloatTensor, **lowerCamelCase : List[Any] ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
@add_start_docstrings(lowerCamelCase )
def __call__( self : Dict, lowerCamelCase : torch.LongTensor, lowerCamelCase : torch.FloatTensor, **lowerCamelCase : str ):
'''simple docstring'''
return any(criteria(lowerCamelCase, lowerCamelCase ) for criteria in self )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(lowerCamelCase, lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(lowerCamelCase, lowerCamelCase ):
return stopping_criterium.max_length
return None
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = stopping_criteria.max_length
lowercase__ = deepcopy(lowerCamelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , lowerCamelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCamelCase_ ) )
return new_stopping_criteria
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A__ : List[Any] = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
A__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( ):
'''simple docstring'''
lowercase__ = '''https://pypi.org/pypi/diffusers/json'''
lowercase__ = json.loads(request.urlopen(lowerCamelCase_ ).read() )['''releases'''].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def a ( ):
'''simple docstring'''
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
lowercase__ = Path(lowerCamelCase_ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a ( lowerCamelCase_ ):
'''simple docstring'''
init_hf_modules()
lowercase__ = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
lowercase__ = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a ( lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
# Imports of the form `import .xxx`
lowercase__ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = False
lowercase__ = [module_file]
lowercase__ = []
# Let's recurse through all relative imports
while not no_change:
lowercase__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
lowercase__ = Path(lowerCamelCase_ ).parent
lowercase__ = [str(module_path / m ) for m in new_imports]
lowercase__ = [f for f in new_import_files if f not in all_relative_imports]
lowercase__ = [F"""{f}.py""" for f in new_import_files]
lowercase__ = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def a ( lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
# Imports of the form `import xxx`
lowercase__ = re.findall('''^\s*import\s+(\S+)\s*$''' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
lowercase__ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase__ = list(set(lowerCamelCase_ ) )
lowercase__ = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"""{', '.join(lowerCamelCase_ )}. Run `pip install {' '.join(lowerCamelCase_ )}`""" )
return get_relative_imports(lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = module_path.replace(os.path.sep , '''.''' )
lowercase__ = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
lowercase__ = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
lowercase__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
lowercase__ = cls
return pipeline_class
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ):
'''simple docstring'''
lowercase__ = str(lowerCamelCase_ )
lowercase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
lowercase__ = module_file_or_url
lowercase__ = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase__ = get_diffusers_versions()
# cut ".dev0"
lowercase__ = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase__ = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
lowercase__ = F"""v{revision}"""
elif revision == "main":
lowercase__ = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
lowercase__ = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
lowercase__ = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
lowercase__ = '''git'''
lowercase__ = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
lowercase__ = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
lowercase__ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
lowercase__ = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
lowercase__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
lowercase__ = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
lowercase__ = F"""{module_needed}.py"""
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = use_auth_token
elif use_auth_token is True:
lowercase__ = HfFolder.get_token()
else:
lowercase__ = None
lowercase__ = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase__ = submodule_path / commit_hash
lowercase__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F"""{module_needed}.py""" , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , **lowerCamelCase_ , ):
'''simple docstring'''
lowercase__ = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('''.py''' , '''''' ) )
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : int = 6 ):
'''simple docstring'''
lowercase__ = None
lowercase__ = None
self.create_linked_list(lowerCamelCase )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = Node()
lowercase__ = current_node
lowercase__ = current_node
lowercase__ = current_node
for _ in range(1, lowerCamelCase ):
lowercase__ = Node()
lowercase__ = current_node
lowercase__ = previous_node
lowercase__ = current_node
lowercase__ = self.front
lowercase__ = previous_node
def lowercase__ ( self : int ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowercase__ ( self : Dict, lowerCamelCase : Any ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ = self.rear.next
if self.rear:
lowercase__ = data
def lowercase__ ( self : Tuple ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ = self.front.data
lowercase__ = None
return data
lowercase__ = self.front
lowercase__ = old_front.next
lowercase__ = old_front.data
lowercase__ = None
return data
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.is_empty():
raise Exception('''Empty Queue''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = None
lowercase__ = None
lowercase__ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.