code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : List[str] = feature_size
snake_case_ : Optional[int] = sampling_rate
snake_case_ : Dict = padding_value
snake_case_ : Any = kwargs.pop('''padding_side''' , '''right''' )
snake_case_ : int = kwargs.pop('''return_attention_mask''' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case_ : Tuple = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
snake_case_ : str = processed_features[self.model_input_names[0]]
snake_case_ : Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
snake_case_ : List[str] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case_ : Optional[Any] = required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case_ : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Any = 'tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Union[str, Any] = 'pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
snake_case_ : Union[str, Any] = 'np'
else:
raise ValueError(
F'''type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '''
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case_ : Union[str, Any] = to_numpy(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Dict = [to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case_ : Tuple = self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = processed_features[self.model_input_names[0]]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case_ : Any = []
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : int = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case_ : Union[str, Any] = self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case_ : Any = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case_ : Optional[Any] = PaddingStrategy.MAX_LENGTH
snake_case_ : Optional[Any] = {}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
snake_case_ : List[str] = self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case_ : Union[str, Any] = []
if value.dtype is np.dtype(np.floataa ):
snake_case_ : Any = value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = PaddingStrategy.DO_NOT_PAD , __magic_name__ = None , __magic_name__ = None , ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case_ : int = len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case_ : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case_ : int = np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case_ : List[Any] = max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
snake_case_ : Any = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case_ : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case_ : Tuple = np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case_ : List[Any] = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case_ : Any = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case_ : Union[str, Any] = np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> List[Any]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case_ : Union[str, Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
snake_case_ : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case_ : Optional[Any] = processed_features['attention_mask'][:max_length]
return processed_features
def lowerCamelCase (self , __magic_name__=False , __magic_name__=None ) -> List[str]:
'''simple docstring'''
if padding is not False:
if padding is True:
snake_case_ : Any = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[Any] = padding
else:
snake_case_ : Union[str, Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 279 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='utf-8' ,check=SCREAMING_SNAKE_CASE__ ,)
assert hasattr(self ,'env')
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase : Any = {
'enabled': True,
'processes_per_host': 8,
}
__lowerCamelCase : List[Any] = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__lowerCamelCase : str = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__lowerCamelCase : List[str] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" ,instance_count=SCREAMING_SNAKE_CASE__ ,instance_type=self.instance_type ,debugger_hook_config=SCREAMING_SNAKE_CASE__ ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} ,metric_definitions=self.env.metric_definitions ,distribution=SCREAMING_SNAKE_CASE__ ,py_version='py36' ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
# create estimator
__lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE__)
# run training
estimator.fit()
# result dataframe
__lowerCamelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
__lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' ,9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,SCREAMING_SNAKE_CASE__)
| 73 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | """simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _lowercase , _lowercase):
@register_to_config
def __init__( self : Tuple , __UpperCamelCase : bool , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None ) -> int:
super().__init__()
_UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCamelCase = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
_UpperCamelCase = None
_UpperCamelCase = torch.nn.Parameter(__UpperCamelCase )
class UpperCAmelCase_ ( _lowercase):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self : List[str] , __UpperCamelCase : VQModel , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : TransformeraDModel , __UpperCamelCase : VQDiffusionScheduler , __UpperCamelCase : LearnedClassifierFreeSamplingEmbeddings , ) -> Optional[int]:
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ) -> str:
_UpperCamelCase = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
_UpperCamelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
_UpperCamelCase = [''''''] * batch_size
_UpperCamelCase = text_input_ids.shape[-1]
_UpperCamelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase = negative_prompt_embeds.shape[1]
_UpperCamelCase = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
_UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[str] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 100 , __UpperCamelCase : float = 5.0 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = len(__UpperCamelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
_UpperCamelCase = batch_size * num_images_per_prompt
_UpperCamelCase = guidance_scale > 1.0
_UpperCamelCase = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__UpperCamelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCamelCase = self.transformer.num_vector_embeds - 1
_UpperCamelCase = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
_UpperCamelCase = self.scheduler.timesteps.to(self.device )
_UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
_UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCamelCase = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = model_output.chunk(2 )
_UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
_UpperCamelCase = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
_UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.vqvae.config.vq_embed_dim
_UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCamelCase = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
_UpperCamelCase = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float ) -> torch.FloatTensor:
_UpperCamelCase , _UpperCamelCase = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
_UpperCamelCase = torch.exp(__UpperCamelCase )
_UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
_UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCamelCase = keep_mask[:, :-1, :]
_UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCamelCase = log_p_x_0.clone()
_UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 54 | 1 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: Any = 1
__lowerCAmelCase: List[Any] = 2
while i * i <= n:
__lowerCAmelCase: List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a__ ( ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = 1
__lowerCAmelCase: Tuple = 1
while True:
i += 1
t_num += i
if count_divisors(__SCREAMING_SNAKE_CASE ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 217 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
__A = {
"google/rembert": 256,
}
__A = "▁"
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = RemBertTokenizer
def __init__( self : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int="[CLS]" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Dict="[SEP]" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Any="[CLS]" , UpperCamelCase__ : str="[MASK]" , **UpperCamelCase__ : Optional[Any] , )-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = do_lower_case
__lowerCAmelCase: int = remove_space
__lowerCAmelCase: int = keep_accents
__lowerCAmelCase: str = vocab_file
__lowerCAmelCase: Tuple = False if not self.vocab_file else True
def lowercase_ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
__lowerCAmelCase: Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False)-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__)) + [1] + ([0] * len(UpperCamelCase__)) + [1]
return [1] + ([0] * len(UpperCamelCase__)) + [1]
def lowercase_ ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
__lowerCAmelCase: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase__))
return
__lowerCAmelCase: Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase__):
copyfile(self.vocab_file , UpperCamelCase__)
return (out_vocab_file,)
| 217 | 1 |
"""simple docstring"""
from __future__ import annotations
def _A (__a ) -> list[int]: # This function is recursive
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(__a )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE_ : List[str] = array[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Tuple = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE_ : str = longest_subsequence(__a )
if len(__a ) > len(__a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE_ : Dict = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE_ : List[str] = [pivot, *longest_subsequence(__a )]
if len(__a ) > len(__a ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
snake_case : Union[str, Any] = abs(_lowerCAmelCase )
snake_case : Union[str, Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Optional[Any] = abs(_lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
return sum(int(_lowerCAmelCase ) for c in str(abs(_lowerCAmelCase ) ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase ,lowercase ) -> None:
snake_case : str = f"""{func.__name__}({value})"""
snake_case : Union[str, Any] = timeit(f"""__main__.{call}""" ,setup="""import __main__""" )
print(f"""{call:56} = {func(_lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCAmelCase ,_lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 | """simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = args.pruning_method
lowercase__ : Tuple = args.threshold
lowercase__ : str = args.model_name_or_path.rstrip('/' )
lowercase__ : List[Any] = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowercase__ : Optional[Any] = torch.load(os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
lowercase__ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ : Tuple = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase__ : List[str] = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowercase__ : Optional[Any] = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase__ : Optional[Any] = MagnitudeBinarizer.apply(inputs=_lowerCAmelCase , threshold=_lowerCAmelCase )
lowercase__ : Optional[int] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ : Optional[Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Any = TopKBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ : Any = name[:-6]
lowercase__ : Optional[Any] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Tuple = ThresholdBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ : Union[str, Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ , lowercase__ : Tuple = -0.1, 1.1
lowercase__ : Optional[Any] = torch.sigmoid(_lowerCAmelCase )
lowercase__ : Optional[Any] = s * (r - l) + l
lowercase__ : Optional[Any] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowercase__ : Union[str, Any] = os.path.join(
os.path.dirname(_lowerCAmelCase ) , f"""bertarized_{os.path.basename(_lowerCAmelCase )}""" )
if not os.path.isdir(_lowerCAmelCase ):
shutil.copytree(_lowerCAmelCase , _lowerCAmelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_UpperCamelCase : Dict = parser.parse_args()
main(args)
| 77 | 0 |
lowerCamelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 353 |
def lowerCamelCase_ ( _a = 4_000_000 ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_a )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = b, a + b
return sum(_a )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 259 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class __A ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = (16, 32, 96, 256)
lowerCAmelCase_ = jnp.floataa
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ = self.block_out_channels[i]
lowerCamelCase__ = self.block_out_channels[i + 1]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = blocks
lowerCamelCase__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
for block in self.blocks:
lowerCamelCase__ = block(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
lowerCamelCase__ = self.conv_out(__lowerCAmelCase )
return embedding
@flax_register_to_config
class __A ( nn.Module , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 32
lowerCAmelCase_ = 4
lowerCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ = False
lowerCAmelCase_ = (320, 640, 1280, 1280)
lowerCAmelCase_ = 2
lowerCAmelCase_ = 8
lowerCAmelCase_ = None
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = "rgb"
lowerCAmelCase_ = (16, 32, 96, 256)
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(__lowerCAmelCase )
lowerCamelCase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )["params"]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.block_out_channels
lowerCamelCase__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ = FlaxTimestepEmbedding(__lowerCAmelCase , dtype=self.dtype )
lowerCamelCase__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase__ = self.only_cross_attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = block_out_channels[0]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ = output_channel
lowerCamelCase__ = block_out_channels[i]
lowerCamelCase__ = i == len(__lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase__ = FlaxDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCAmelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
if not is_final_block:
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
lowerCamelCase__ = down_blocks
lowerCamelCase__ = controlnet_down_blocks
# mid
lowerCamelCase__ = block_out_channels[-1]
lowerCamelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = False , ):
'''simple docstring'''
lowerCamelCase__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ = jnp.flip(__lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(__lowerCAmelCase , jnp.ndarray ):
lowerCamelCase__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ = jnp.expand_dims(__lowerCAmelCase , 0 )
lowerCamelCase__ = self.time_proj(__lowerCAmelCase )
lowerCamelCase__ = self.time_embedding(__lowerCAmelCase )
# 2. pre-process
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.controlnet_cond_embedding(__lowerCAmelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ = self.mid_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ = ()
for down_block_res_sample, controlnet_block in zip(__lowerCAmelCase , self.controlnet_down_blocks ):
lowerCamelCase__ = controlnet_block(__lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ = controlnet_down_block_res_samples
lowerCamelCase__ = self.controlnet_mid_block(__lowerCAmelCase )
# 6. scaling
lowerCamelCase__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCAmelCase , mid_block_res_sample=__lowerCAmelCase )
| 209 | 0 |
"""simple docstring"""
from string import ascii_uppercase
UpperCAmelCase__ = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase__ = dict(enumerate(ascii_uppercase))
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> str:
_snake_case = len(__lowerCamelCase )
_snake_case = 0
while True:
if x == i:
_snake_case = 0
if len(__lowerCamelCase ) == len(__lowerCamelCase ):
break
key += key[i]
i += 1
return key
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> str:
_snake_case = ''''''
_snake_case = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_snake_case = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> str:
_snake_case = ''''''
_snake_case = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_snake_case = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _UpperCAmelCase ( ) -> None:
_snake_case = '''THE GERMAN ATTACK'''
_snake_case = '''SECRET'''
_snake_case = generate_key(__lowerCamelCase , __lowerCamelCase )
_snake_case = cipher_text(__lowerCamelCase , __lowerCamelCase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(__lowerCamelCase , __lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 40 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
UpperCAmelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_INIT_CONFIGURATION
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = BertTokenizer
def __init__( self : Optional[int] , _lowerCamelCase : int=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Any="[SEP]" , _lowerCamelCase : Any="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Dict="[MASK]" , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=None , **_lowerCamelCase : int , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
_snake_case = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**_lowerCamelCase )
_snake_case = do_lower_case
def lowercase ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=None ):
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
_snake_case = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 40 | 1 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 181 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
UpperCamelCase__ = {
'''facebook/xglm-564M''': 2_0_4_8,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Optional[Any]="<s>" , _A : List[str]="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Tuple="<unk>" , _A : List[str]="<pad>" , _A : Optional[Dict[str, Any]] = None , **_A : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase__ : Optional[int] = 7
UpperCAmelCase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCAmelCase__ : Tuple = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
UpperCAmelCase__ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCAmelCase__ : int = len(self.sp_model )
UpperCAmelCase__ : Optional[int] = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_A )
UpperCAmelCase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.__dict__.copy()
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase__ : Dict = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowercase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A ))
def lowercase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : Optional[Any] , _A : str ):
'''simple docstring'''
return self.sp_model.encode(_A , out_type=_A )
def lowercase_ ( self : List[str] , _A : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ : Union[str, Any] = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self : List[Any] , _A : str ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self : int , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def lowercase_ ( self : Any , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : List[str] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
UpperCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 181 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : Optional[int] = 16
__A : Dict = 32
def UpperCamelCase_ ( A__ : Accelerator , A__ : int = 16 ):
'''simple docstring'''
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : List[str] = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : Any = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : int = 8
else:
lowerCAmelCase_ : Dict = None
return tokenizer.pad(
A__ , padding="""longest""" , max_length=A__ , pad_to_multiple_of=A__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowerCAmelCase_ : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A : Tuple = mocked_dataloaders # noqa: F811
def UpperCamelCase_ ( A__ : List[str] , A__ : Dict ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , A__ ) == "1":
lowerCAmelCase_ : str = 2
# Initialize accelerator
lowerCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Optional[int] = config["""lr"""]
lowerCAmelCase_ : Any = int(config["""num_epochs"""] )
lowerCAmelCase_ : Dict = int(config["""seed"""] )
lowerCAmelCase_ : Optional[int] = int(config["""batch_size"""] )
lowerCAmelCase_ : List[str] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase_ : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase_ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase_ : List[str] = MAX_GPU_BATCH_SIZE
set_seed(A__ )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = get_dataloaders(A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : int = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : Dict = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
lowerCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=1_00 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ : List[str] = model(**A__ )
lowerCAmelCase_ : int = outputs.loss
lowerCAmelCase_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase_ : str = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : Any = model(**A__ )
lowerCAmelCase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase_ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A__ , references=A__ , )
lowerCAmelCase_ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , A__ )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=A__ , default=A__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
lowerCAmelCase_ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 89 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : int | float | str , A__ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase_ : str = int(A__ )
lowerCAmelCase_ : Tuple = int(A__ )
lowerCAmelCase_ : list[str] = []
for temp in range(int(A__ ) ):
series.append(f'1 / {pow(temp + 1 , int(A__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : str = int(input("Enter the last number (nth term) of the P-Series"))
__A : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 89 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : List[str] = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 115 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase : List[str] , **UpperCamelCase : Any ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Tuple = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = object_detector(examples[0] , threshold=0.0 )
__UpperCAmelCase : Tuple = len(UpperCamelCase )
self.assertGreater(UpperCamelCase , 0 )
self.assertEqual(
UpperCamelCase , [
{
"""score""": ANY(UpperCamelCase ),
"""label""": ANY(UpperCamelCase ),
"""box""": {"""xmin""": ANY(UpperCamelCase ), """ymin""": ANY(UpperCamelCase ), """xmax""": ANY(UpperCamelCase ), """ymax""": ANY(UpperCamelCase )},
}
for i in range(UpperCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Tuple = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
__UpperCAmelCase : Any = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : int = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
__UpperCAmelCase : List[str] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 0.2
__UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : Any = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : Union[str, Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[str] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 115 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , a_ : Optional[Any] , a_ : Dict=13 , a_ : Union[str, Any]=32 , a_ : Dict=3 , a_ : Any=4 , a_ : Any=[10, 20, 30, 40] , a_ : Optional[int]=[2, 2, 3, 2] , a_ : Tuple=True , a_ : str=True , a_ : str=37 , a_ : Optional[int]="gelu" , a_ : Dict=10 , a_ : Union[str, Any]=0.02 , a_ : List[Any]=["stage2", "stage3", "stage4"] , a_ : Union[str, Any]=[2, 3, 4] , a_ : List[Any]=None , ):
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : List[str] = num_stages
lowerCAmelCase_ : Union[str, Any] = hidden_sizes
lowerCAmelCase_ : List[str] = depths
lowerCAmelCase_ : Any = is_training
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : str = out_features
lowerCAmelCase_ : List[str] = out_indices
lowerCAmelCase_ : List[Any] = scope
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : str = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Tuple ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase ( self : Dict , a_ : Optional[int] , a_ : Any , a_ : Tuple ):
lowerCAmelCase_ : List[str] = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase_ : Optional[int] = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : int , a_ : Union[str, Any] , a_ : List[Any] , a_ : Dict ):
lowerCAmelCase_ : int = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Optional[Any] , a_ : Dict , a_ : List[str] , a_ : Union[str, Any] ):
lowerCAmelCase_ : Dict = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Any = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase_ : List[str] = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
a_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
a_ : Union[str, Any] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
a_ : Optional[int] = True
a_ : str = False
a_ : List[Any] = False
a_ : Union[str, Any] = False
a_ : int = False
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Dict = ConvNextModelTester(self )
lowerCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def lowerCamelCase ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ):
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def lowerCamelCase ( self : Tuple ):
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : int = model_class(a__ )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase_ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def lowerCamelCase ( self : Optional[int] ):
def check_hidden_states_output(a_ : Optional[int] , a_ : List[str] , a_ : Optional[Any] ):
lowerCAmelCase_ : Any = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(a__ , a__ ) )
lowerCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[Any] = True
check_hidden_states_output(a__ , a__ , a__ )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Dict = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : Any ):
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(a__ )
lowerCAmelCase_ : Union[str, Any] = self.default_image_processor
lowerCAmelCase_ : Dict = prepare_img()
lowerCAmelCase_ : Optional[Any] = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**a__ )
# verify the logits
lowerCAmelCase_ : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a__ )
lowerCAmelCase_ : Dict = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@require_torch
class __lowerCamelCase ( unittest.TestCase , __a ):
'''simple docstring'''
a_ : List[Any] = (ConvNextBackbone,) if is_torch_available() else ()
a_ : List[str] = ConvNextConfig
a_ : int = False
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[str] = ConvNextModelTester(self )
| 360 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a_ : Any , a_ : Any=None , a_ : int=None , a_ : str=None , a_ : Optional[int]="resnet50" , a_ : str=3 , a_ : str=32 , a_ : Union[str, Any]=3 , a_ : Tuple=True , a_ : List[str]=True , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Dict = out_indices if out_indices is not None else [4]
lowerCAmelCase_ : int = stage_names
lowerCAmelCase_ : Optional[Any] = out_features
lowerCAmelCase_ : Tuple = backbone
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[Any] = num_channels
lowerCAmelCase_ : Optional[int] = use_pretrained_backbone
lowerCAmelCase_ : List[Any] = is_training
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : Dict ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase ( self : Union[str, Any] , a_ : str , a_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = TimmBackbone(config=a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : int = model(a_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = config_and_inputs
lowerCAmelCase_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
a_ : int = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ : Union[str, Any] = False
a_ : str = False
a_ : List[Any] = False
a_ : Dict = False
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = TimmBackboneModelTester(self )
lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def lowerCamelCase ( self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = "resnet18"
lowerCAmelCase_ : List[Any] = "microsoft/resnet-18"
lowerCAmelCase_ : Tuple = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ )
lowerCAmelCase_ : str = AutoBackbone.from_pretrained(a_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase_ : Dict = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ , out_indices=[1, 2, 3] )
lowerCAmelCase_ : Any = AutoBackbone.from_pretrained(a_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowerCamelCase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowerCamelCase ( self : List[Any] ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCamelCase ( self : Any ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowerCamelCase ( self : Tuple ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : str ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCamelCase ( self : Any ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCamelCase ( self : List[str] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : Tuple ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowerCamelCase ( self : int ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(a_ )
lowerCAmelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase_ : int = self.all_model_classes[0]
lowerCAmelCase_ : Optional[int] = model_class(a_ )
model.to(a_ )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(a_ , a_ )
lowerCAmelCase_ : str = model(**a_ )
lowerCAmelCase_ : Any = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase_ : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase_ : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(**a_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase_ : Optional[int] = copy.deepcopy(a_ )
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Any = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = model(**a_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase_ : str = copy.deepcopy(a_ )
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(**a_ )
| 161 | 0 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
UpperCAmelCase : List[Any] = b * b - 4 * a * c
UpperCAmelCase : int = (-b + sqrt(_lowerCAmelCase )) / (2 * a)
UpperCAmelCase : List[Any] = (-b - sqrt(_lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def snake_case_ ( ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 23 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _lowerCamelCase ( self : Optional[int] , a : List[Any]=True , a : Any=False , a : Dict=False , a : Union[str, Any]=False , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 4
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : Tuple = (32, 32)
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = torch.device(a )
lowerCAmelCase__ : str = (batch_size, num_channels) + sizes
lowerCAmelCase__ : Tuple = randn_tensor(a , generator=a , device=a )
lowerCAmelCase__ : Optional[Any] = {'hidden_states': hidden_states}
if include_temb:
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : List[str] = randn_tensor((batch_size, temb_channels) , generator=a , device=a )
if include_res_hidden_states_tuple:
lowerCAmelCase__ : int = torch.manual_seed(1 )
lowerCAmelCase__ : str = (randn_tensor(a , generator=a , device=a ),)
if include_encoder_hidden_states:
lowerCAmelCase__ : Any = floats_tensor((batch_size, 32, 32) ).to(a )
if include_skip_sample:
lowerCAmelCase__ : Union[str, Any] = randn_tensor(((batch_size, 3) + sizes) , generator=a , device=a )
return dummy_input
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
lowerCAmelCase__ : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowerCAmelCase__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : int = self.block_class(**a )
unet_block.to(a )
unet_block.eval()
with torch.no_grad():
lowerCAmelCase__ : int = unet_block(**a )
if isinstance(a , a ):
lowerCAmelCase__ : List[str] = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:]
lowerCAmelCase__ : Any = torch.tensor(a ).to(a )
assert torch_all_close(output_slice.flatten() , a , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : Any = self.block_class(**a )
model.to(a )
model.train()
lowerCAmelCase__ : int = model(**a )
if isinstance(a , a ):
lowerCAmelCase__ : Dict = output[0]
lowerCAmelCase__ : Optional[int] = torch.device(a )
lowerCAmelCase__ : List[Any] = randn_tensor(output.shape , device=a )
lowerCAmelCase__ : List[Any] = torch.nn.functional.mse_loss(a , a )
loss.backward() | 212 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCAmelCase = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__lowerCAmelCase = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = CHRF.CHAR_ORDER , lowerCAmelCase__ = CHRF.WORD_ORDER , lowerCAmelCase__ = CHRF.BETA , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> List[Any]:
'''simple docstring'''
lowercase__: str = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowercase__: List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
lowercase__: Union[str, Any] = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: str = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 368 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__lowerCAmelCase = {'''vinai/bartpho-syllable''': 10_24}
class __a ( __UpperCamelCase ):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
lowercase__: Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
lowercase__: Dict = vocab_file
lowercase__: str = monolingual_vocab_file
lowercase__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__: List[Any] = {}
lowercase__: Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: str = cnt
cnt += 1
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
lowercase__: Optional[Any] = line.strip().split()[0]
lowercase__: Optional[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: Optional[int] = len(self.fairseq_tokens_to_ids )
lowercase__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = self.__dict__.copy()
lowercase__: Tuple = None
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__: Union[str, Any] = {}
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Optional[int] = [self.cls_token_id]
lowercase__: Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Dict = [self.sep_token_id]
lowercase__: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = ''.join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: int = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(lowerCAmelCase__ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 288 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = 0
# Number of processes finished
lowerCAmelCase__ : Tuple = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase__ : List[str] = [0] * no_of_process
# List to include calculation results
lowerCAmelCase__ : str = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase__ : Optional[int] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCAmelCase__ : Tuple = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase__ : Union[str, Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase__ : int = arrival_time[i]
lowerCAmelCase__ : Dict = 0
# Index showing the location of the process being performed
lowerCAmelCase__ : Tuple = 0
# Saves the current response ratio.
lowerCAmelCase__ : List[str] = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase__ : Union[str, Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Any = i
# Calculate the turn around time
lowerCAmelCase__ : Dict = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase__ : str = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCAmelCase__ : str = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_lowerCAmelCase = 5
_lowerCAmelCase = ['''A''', '''B''', '''C''', '''D''', '''E''']
_lowerCAmelCase = [1, 2, 3, 4, 5]
_lowerCAmelCase = [1, 2, 3, 4, 5]
_lowerCAmelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_lowerCAmelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 37 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any]=13 , lowerCamelCase_ :Any=7 , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[str]=99 , lowerCamelCase_ :Dict=32 , lowerCamelCase_ :Union[str, Any]=5 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Optional[Any]="gelu" , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :List[Any]=512 , lowerCamelCase_ :List[str]=16 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Tuple=0.02 , lowerCamelCase_ :Tuple=4 , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =parent
lowerCamelCase__ : List[Any] =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Optional[int] =is_training
lowerCamelCase__ : Optional[Any] =use_attention_mask
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : List[Any] =use_labels
lowerCamelCase__ : Any =vocab_size
lowerCamelCase__ : int =hidden_size
lowerCamelCase__ : Dict =num_hidden_layers
lowerCamelCase__ : int =num_attention_heads
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : Dict =hidden_act
lowerCamelCase__ : str =hidden_dropout_prob
lowerCamelCase__ : Tuple =attention_probs_dropout_prob
lowerCamelCase__ : List[Any] =max_position_embeddings
lowerCamelCase__ : Tuple =type_vocab_size
lowerCamelCase__ : Any =type_sequence_label_size
lowerCamelCase__ : Dict =initializer_range
lowerCamelCase__ : str =num_choices
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Any =None
if self.use_attention_mask:
lowerCamelCase__ : Any =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any =None
if self.use_token_type_ids:
lowerCamelCase__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : str =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =config_and_inputs
lowerCamelCase__ : int ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : str =FlaxBertModelTester(self )
@slow
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Dict =FlaxBertModel.from_pretrained('bert-base-cased' )
lowerCamelCase__ : List[str] =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ ) | 126 | 0 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : set ):
'''simple docstring'''
__snake_case , __snake_case : Any = len(__SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__snake_case : List[Any] = 0
count += depth_first_search(__SCREAMING_SNAKE_CASE , row + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , row - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col + 1 , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col - 1 , __SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case , __snake_case : str = array[indexa], array[indexa]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Tuple = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 20 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase: List[str] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: str = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Any = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCamelCase: List[Any] = logging.getLogger(__name__)
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether tp freeze the encoder.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_lowerCamelCase = field(
default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}, )
_lowerCamelCase = field(
default=1_024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=128, metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(default=-1, metadata={'help': '# training examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# validation examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# test examples. -1 means use all.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Source language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Target language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': '# num_beams to use for evaluation.'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'}, )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , f'''{split}_results.json''' ) )
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : int = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : Dict = SeqaSeqDataset
# Get datasets
lowercase : int = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
lowercase : str = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Optional[Any] = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase ) if training_args.predict_with_generate else None
)
lowercase : List[Any] = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
lowercase : List[Any] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
lowercase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : List[str] = train_result.metrics
lowercase : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase : Tuple = trainer.evaluate(metric_key_prefix='val' )
lowercase : Dict = data_args.n_val
lowercase : Tuple = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
lowercase : List[Any] = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test' )
lowercase : str = test_output.metrics
lowercase : Dict = data_args.n_test
if trainer.is_world_process_zero():
lowercase : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.predict_with_generate:
lowercase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
lowercase : Tuple = lmap(str.strip , _UpperCAmelCase )
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 255 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = "falcon"
__snake_case : Optional[int] = ["past_key_values"]
def __init__( self: int , UpperCAmelCase_: int=65_024 , UpperCAmelCase_: Union[str, Any]=4_544 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: str=71 , UpperCAmelCase_: Optional[int]=1E-5 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: Optional[Any]=0.0 , UpperCAmelCase_: int=0.0 , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: List[Any]=False , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Any=True , UpperCAmelCase_: str=True , UpperCAmelCase_: int=False , UpperCAmelCase_: int=11 , UpperCAmelCase_: List[Any]=11 , **UpperCAmelCase_: Any , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab_size
# Backward compatibility with n_embed kwarg
_SCREAMING_SNAKE_CASE = kwargs.pop("""n_embed""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = hidden_dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = num_attention_heads if num_kv_heads is None else num_kv_heads
_SCREAMING_SNAKE_CASE = alibi
_SCREAMING_SNAKE_CASE = new_decoder_architecture
_SCREAMING_SNAKE_CASE = multi_query # Ignored when new_decoder_architecture is True
_SCREAMING_SNAKE_CASE = parallel_attn
_SCREAMING_SNAKE_CASE = bias
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return not self.alibi
| 125 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCamelCase = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCamelCase = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCamelCase = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 ,dim=0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 ,dim=0 )
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.bias']
_SCREAMING_SNAKE_CASE = weight_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_SCREAMING_SNAKE_CASE = checkpoint["""label_emb.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.bias"""]
_SCREAMING_SNAKE_CASE = unet_config["""down_block_types"""]
_SCREAMING_SNAKE_CASE = unet_config["""layers_per_block"""]
_SCREAMING_SNAKE_CASE = unet_config["""attention_head_dim"""]
_SCREAMING_SNAKE_CASE = unet_config["""block_out_channels"""]
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = channels_list[0]
for i, layer_type in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = channels_list[i]
_SCREAMING_SNAKE_CASE = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.downsamplers.0'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
_SCREAMING_SNAKE_CASE = current_channels
# hardcoded the mid-block for now
_SCREAMING_SNAKE_CASE = """mid_block.resnets.0"""
_SCREAMING_SNAKE_CASE = """middle_block.0"""
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """mid_block.attentions.0"""
_SCREAMING_SNAKE_CASE = """middle_block.1"""
_SCREAMING_SNAKE_CASE = convert_attention(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """mid_block.resnets.1"""
_SCREAMING_SNAKE_CASE = """middle_block.2"""
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = unet_config["""up_block_types"""]
for i, layer_type in enumerate(snake_case__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.1'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.2'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = strabool(args.class_cond)
UpperCamelCase = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
UpperCamelCase = None
UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 125 | 1 |
from collections import namedtuple
_lowerCamelCase : Tuple = namedtuple("""from_to""", """from_ to""")
_lowerCamelCase : Dict = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(lowercase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(lowercase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a__ ( snake_case__ ):
_a : Optional[int] = """decision_transformer"""
_a : Optional[int] = ["""past_key_values"""]
_a : Dict = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
| 92 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCamelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 359 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = Rectangle(height=0.5 ,width=0.5 )
lowerCAmelCase__ : List[Any] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Optional[Any] = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Optional[int] = VGroup(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Dict = Text('''CPU''' ,font_size=2_4 )
lowerCAmelCase__ : Optional[int] = Group(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0.5 ,aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
lowerCAmelCase__ : Tuple = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : List[Any] = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Any = Text('''GPU''' ,font_size=2_4 )
lowerCAmelCase__ : str = Group(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0.5 ,aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
lowerCAmelCase__ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[str] = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Tuple = Text('''Model''' ,font_size=2_4 )
lowerCAmelCase__ : List[str] = Group(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0.5 ,aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
lowerCAmelCase__ : Tuple = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCAmelCase__ : List[Any] = Rectangle(height=0.46 / 4 ,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] ,direction=lowercase_ ,buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] ,direction=lowercase_ ,buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
lowerCAmelCase__ : List[str] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[int] = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Dict = Text('''Loaded Checkpoint''' ,font_size=2_4 )
lowerCAmelCase__ : List[str] = Group(lowercase_ ,lowercase_ ).arrange(lowercase_ ,aligned_edge=lowercase_ ,buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCAmelCase__ : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : Optional[Any] = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Tuple = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=1_8 ,)
blue_text.next_to(lowercase_ ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
lowerCAmelCase__ : Tuple = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' ,font_size=2_4 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) ,Write(lowercase_ ) )
self.play(Write(lowercase_ ,run_time=1 ) ,Create(lowercase_ ,run_time=1 ) )
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[Any] = []
for i, rect in enumerate(lowercase_ ):
lowerCAmelCase__ : List[str] = fill.copy().set_fill(lowercase_ ,opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ ,run_time=1 ) )
lowerCAmelCase__ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ ,run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 74 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] =JukeboxTokenizer
UpperCAmelCase__ : Optional[int] ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _lowercase ( self : Optional[int] ) ->str:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE : Dict = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
SCREAMING_SNAKE_CASE : Optional[Any] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowercase ( self : str ) ->Optional[Any]:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE : List[str] = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
SCREAMING_SNAKE_CASE : int = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 245 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["image_processor", "tokenizer"]
__A : Dict = "BridgeTowerImageProcessor"
__A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : str = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowerCamelCase : int = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 81 | def snake_case ( ) -> Any:
for n in range(1 , 1_000_000):
yield n * (n + 1) // 2
def snake_case ( snake_case__ :Dict) -> Optional[Any]:
_A = 1
_A = 2
while i * i <= n:
_A = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case ( ) -> Optional[Any]:
return next(i for i in triangle_number_generator() if count_divisors(snake_case__) > 500)
if __name__ == "__main__":
print(solution())
| 81 | 1 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : int ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = 2
_UpperCamelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase )
if n > 1:
factors.append(lowercase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
'''simple docstring'''
import os
import numpy
import onnx
def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = a.name
_UpperCamelCase = b.name
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = a == b
_UpperCamelCase = name_a
_UpperCamelCase = name_b
return res
def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase, lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
_graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowercase, lowercase, lowercase )
def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, lowercase, lowercase )
def a__ ( lowercase : Dict ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.dirname(lowercase )
_UpperCamelCase = os.path.basename(lowercase )
_UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) )
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = set()
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 0
for i in range(len(lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(lowercase )
dup_set.add(lowercase )
_UpperCamelCase = inits[j].data_type
_UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', lowercase )
total_reduced_size += mem_size
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase )
else:
_UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' )
_UpperCamelCase = sorted(lowercase )
_remove_dup_initializers_from_model(lowercase, lowercase, lowercase )
_UpperCamelCase = '''optimized_''' + model_file_name
_UpperCamelCase = os.path.join(lowercase, lowercase )
onnx.save(lowercase, lowercase )
return new_model
| 324 | 1 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A_ ( _lowerCamelCase ):
def __init__(self :List[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :str )-> Tuple:
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__(self :Dict , _UpperCamelCase :int = 1 , _UpperCamelCase :Optional[torch.Generator] = None , _UpperCamelCase :int = 50 , _UpperCamelCase :Optional[str] = "pil" , _UpperCamelCase :bool = True , **_UpperCamelCase :List[Any] , )-> Union[ImagePipelineOutput, Tuple]:
__A = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_UpperCamelCase , )
__A = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
__A = (image / 2 + 0.5).clamp(0 , 1 )
__A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_UpperCamelCase ), "This is a local test"
| 250 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_text_model"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self :Any , _UpperCamelCase :int=5_0244 , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :Optional[Any]=64 , _UpperCamelCase :Dict=2048 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Optional[int]=32 , _UpperCamelCase :Dict=128 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :List[str]=1e-6 , _UpperCamelCase :Optional[Any]=1.0 , _UpperCamelCase :Union[str, Any]="gelu_new" , _UpperCamelCase :int=0 , _UpperCamelCase :int=False , _UpperCamelCase :int=0 , _UpperCamelCase :Dict=1 , _UpperCamelCase :Any=False , _UpperCamelCase :Optional[Any]=True , **_UpperCamelCase :Tuple , )-> Dict:
__A = vocab_size
__A = hidden_size
__A = d_kv
__A = d_ff
__A = num_layers
__A = num_heads
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = dropout_rate
__A = layer_norm_epsilon
__A = initializer_factor
__A = use_cache
__A = eos_token_id
__A = decoder_start_token_id
# for backwards compatibility
__A = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , is_decoder=_UpperCamelCase , **_UpperCamelCase , )
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_vision_model"""
def __init__(self :Dict , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :List[str]=768 , _UpperCamelCase :Any=2048 , _UpperCamelCase :Tuple=64 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Tuple="gelu_new" , _UpperCamelCase :Dict=1e-6 , _UpperCamelCase :int=0.0 , _UpperCamelCase :int=0.0 , _UpperCamelCase :Union[str, Any]=1e-10 , _UpperCamelCase :Tuple=1.0 , _UpperCamelCase :Tuple=4096 , _UpperCamelCase :List[str]=32 , _UpperCamelCase :Optional[Any]=128 , **_UpperCamelCase :List[str] , )-> Any:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = patch_embed_hidden_size
__A = d_ff
__A = dropout_rate
__A = num_hidden_layers
__A = num_attention_heads
__A = initializer_range
__A = initializer_factor
__A = attention_dropout
__A = layer_norm_eps
__A = dense_act_fn
__A = seq_len
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = d_kv
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[str] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct"""
lowerCAmelCase__ = True
def __init__(self :List[Any] , _UpperCamelCase :str=None , _UpperCamelCase :int=None , _UpperCamelCase :List[Any]=1.0 , _UpperCamelCase :int=0.0_2 , _UpperCamelCase :List[str]=False , _UpperCamelCase :Optional[Any]=False , _UpperCamelCase :int=True , **_UpperCamelCase :Any , )-> Optional[Any]:
super().__init__(tie_word_embeddings=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
if text_config is None:
__A = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__A = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__A = PixaStructTextConfig(**_UpperCamelCase )
__A = PixaStructVisionConfig(**_UpperCamelCase )
__A = self.text_config.decoder_start_token_id
__A = self.text_config.pad_token_id
__A = self.text_config.eos_token_id
__A = initializer_factor
__A = initializer_range
__A = self.initializer_range
__A = self.initializer_range
__A = is_vqa
@classmethod
def _lowerCAmelCase (cls :str , _UpperCamelCase :PixaStructTextConfig , _UpperCamelCase :PixaStructVisionConfig , **_UpperCamelCase :Union[str, Any] )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> int:
__A = copy.deepcopy(self.__dict__ )
__A = self.text_config.to_dict()
__A = self.vision_config.to_dict()
__A = self.__class__.model_type
return output
| 250 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = ['image_processor', 'tokenizer']
__UpperCAmelCase = 'CLIPImageProcessor'
__UpperCAmelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Any ,snake_case : Tuple=None ,snake_case : Optional[Any]=None ,**snake_case : str ):
SCREAMING_SNAKE_CASE =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,snake_case ,)
SCREAMING_SNAKE_CASE =kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case ,snake_case )
def __call__( self : int ,snake_case : Tuple=None ,snake_case : Optional[Any]=None ,snake_case : Union[str, Any]=None ,**snake_case : Optional[int] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE =self.tokenizer(snake_case ,return_tensors=snake_case ,**snake_case )
if images is not None:
SCREAMING_SNAKE_CASE =self.image_processor(snake_case ,return_tensors=snake_case ,**snake_case )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) ,tensor_type=snake_case )
def _lowerCAmelCase ( self : str ,*snake_case : List[Any] ,**snake_case : str ):
return self.tokenizer.batch_decode(*snake_case ,**snake_case )
def _lowerCAmelCase ( self : Optional[Any] ,*snake_case : Dict ,**snake_case : Tuple ):
return self.tokenizer.decode(*snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self : Any ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,snake_case ,)
return self.image_processor_class
@property
def _lowerCAmelCase ( self : Dict ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,snake_case ,)
return self.image_processor
| 334 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE_ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
'''simple docstring'''
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 352 |
from __future__ import annotations
import math
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [n]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if len(str(_SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE = 11 ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 13
while len(_SCREAMING_SNAKE_CASE ) != count:
if validate(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = list_truncated_nums(_SCREAMING_SNAKE_CASE )
if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(_SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def __lowercase ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(1_1)) = }''')
| 193 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a :str = logging.get_logger(__name__)
a :Union[str, Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __a (lowerCAmelCase__ , lowerCAmelCase__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = """swin"""
_SCREAMING_SNAKE_CASE :Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1E-5 , _a=32 , _a=None , _a=None , **_a , ) -> int:
"""simple docstring"""
super().__init__(**__a )
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : int = embed_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__a )
SCREAMING_SNAKE_CASE__ : Tuple = num_heads
SCREAMING_SNAKE_CASE__ : int = window_size
SCREAMING_SNAKE_CASE__ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ : Dict = int(embed_dim * 2 ** (len(__a ) - 1) )
SCREAMING_SNAKE_CASE__ : List[str] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__a ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class __a (lowerCAmelCase__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = version.parse("""1.11""")
@property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return 1E-4
| 132 |
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: list[int] ):
if len(a_ ) == 0:
return array
_UpperCAmelCase , _UpperCAmelCase : List[str] = min(a_ ), max(a_ )
# Compute the variables
_UpperCAmelCase : str = _max - _min + 1
_UpperCAmelCase , _UpperCAmelCase : Dict = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCAmelCase : int = i - _min
_UpperCAmelCase : List[str] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCAmelCase : Tuple = 0
for i in range(a_ ):
while holes_repeat[i] > 0:
_UpperCAmelCase : Tuple = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by comma:\n')
__a = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted)) | 17 | '''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files", [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
], )
def __UpperCAmelCase ( a_: Tuple, a_: Any ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(a_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info", [
DatasetInfo(),
DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ),
], )
def __UpperCAmelCase ( a_: Union[str, Any], a_: DatasetInfo ):
_UpperCAmelCase : Tuple = str(a_ )
dataset_info.write_to_directory(a_ )
_UpperCAmelCase : Any = DatasetInfo.from_directory(a_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a_, "dataset_info.json" ) )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = DatasetInfo(
description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, )
_UpperCAmelCase : Tuple = dataset_info._to_yaml_dict()
assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_UpperCAmelCase : List[Any] = yaml.safe_dump(a_ )
_UpperCAmelCase : Optional[int] = yaml.safe_load(a_ )
assert dataset_info_yaml_dict == reloaded
def __UpperCAmelCase ( ):
_UpperCAmelCase : str = DatasetInfo()
_UpperCAmelCase : List[str] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict", [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
], )
def __UpperCAmelCase ( a_: str, a_: DatasetInfosDict ):
_UpperCAmelCase : Union[str, Any] = str(a_ )
dataset_infos_dict.write_to_directory(a_ )
_UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(a_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a_, "README.md" ) ) | 17 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase_ : str = logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Optional[int] = ["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = 0.9 , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = True , snake_case_ = None , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
super().__init__(**snake_case_ )
A_ : Union[str, Any] = size if size is not None else {'shortest_edge': 2_2_4}
A_ : Tuple = get_size_dict(snake_case_ , default_to_square=snake_case_ )
A_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
A_ : List[str] = get_size_dict(snake_case_ , param_name='crop_size' )
A_ : Optional[Any] = do_resize
A_ : Tuple = size
A_ : Dict = crop_pct
A_ : Optional[Any] = resample
A_ : Optional[int] = do_center_crop
A_ : Tuple = crop_size
A_ : Union[str, Any] = do_rescale
A_ : Any = rescale_factor
A_ : Any = do_normalize
A_ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A_ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
A_ : int = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
A_ : List[Any] = int(size['height'] / crop_pct )
else:
A_ : str = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(snake_case_ ) )
A_ : Union[str, Any] = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
else:
if "shortest_edge" in size:
A_ : Optional[Any] = get_resize_output_image_size(snake_case_ , size=size['shortest_edge'] , default_to_square=snake_case_ )
elif "height" in size and "width" in size:
A_ : Dict = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(snake_case_ ) )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : Union[str, Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size['height'], size['width']) , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
"""simple docstring"""
A_ : List[Any] = do_resize if do_resize is not None else self.do_resize
A_ : str = crop_pct if crop_pct is not None else self.crop_pct
A_ : Tuple = resample if resample is not None else self.resample
A_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = do_rescale if do_rescale is not None else self.do_rescale
A_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : Tuple = size if size is not None else self.size
A_ : List[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
A_ : int = crop_size if crop_size is not None else self.crop_size
A_ : Optional[Any] = get_size_dict(snake_case_ , param_name='crop_size' )
A_ : Optional[Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
A_ : Optional[Any] = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
A_ : Union[str, Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
A_ : str = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
A_ : List[str] = {'pixel_values': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ ) | 286 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase_ : Any = HfArgumentParser(InitializationArguments)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase_ : Tuple = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase_ : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 286 | 1 |
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
__lowerCamelCase = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __magic_name__ ( __lowerCAmelCase : str ) -> dict[str, str]:
__lowerCamelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowerCamelCase = remove_duplicates(key.upper() )
__lowerCamelCase = len(UpperCamelCase__ )
# First fill cipher with key characters
__lowerCamelCase = {alphabet[i]: char for i, char in enumerate(UpperCamelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCamelCase__ ) , 26 ):
__lowerCamelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowerCamelCase = alphabet[i - offset]
__lowerCamelCase = char
return cipher_alphabet
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : dict[str, str] ) -> str:
return "".join(cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : dict[str, str] ) -> str:
__lowerCamelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def __magic_name__ ( ) -> None:
__lowerCamelCase = input('''Enter message to encode or decode: ''' ).strip()
__lowerCamelCase = input('''Enter keyword: ''' ).strip()
__lowerCamelCase = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__lowerCamelCase = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__lowerCamelCase = create_cipher_map(UpperCamelCase__ )
print(func(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 367 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE__ : str = ""
SCREAMING_SNAKE_CASE__ : Any = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def __magic_name__ ( ) -> None:
__lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print('''Processing...''' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase = random_chars(32 )
__lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
__lowerCamelCase = []
for anno in new_annos[index]:
__lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(f'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]:
__lowerCamelCase = []
__lowerCamelCase = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ):
__lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
__lowerCamelCase = in_file.readlines()
__lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' )
__lowerCamelCase = []
for obj_list in obj_lists:
__lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]:
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for idx in range(len(__lowerCAmelCase ) ):
__lowerCamelCase = []
__lowerCamelCase = img_list[idx]
path_list.append(__lowerCAmelCase )
__lowerCamelCase = anno_list[idx]
__lowerCamelCase = cva.imread(__lowerCAmelCase )
if flip_type == 1:
__lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
__lowerCamelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
__lowerCamelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 339 | 0 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
_lowerCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
_lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase = '''mid_block.attentions.0.'''
_lowerCAmelCase = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{j}."""
_lowerCAmelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ : Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0."""
_lowerCAmelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{i}."""
_lowerCAmelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
_lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )]
lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ : List[Any] = [None, None, None]
lowerCAmelCase__ : Dict = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )]
lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ : Union[str, Any] = [None, None, None]
lowerCAmelCase__ : Any = v
continue
lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase )
return new_state_dict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
_lowerCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase = load_file(unet_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
_lowerCAmelCase = load_file(vae_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
_lowerCAmelCase = load_file(text_enc_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
_lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
_lowerCAmelCase = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 37 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
# Construct model
if openai_config_file == "":
lowercase_ : Any = OpenAIGPTConfig()
else:
lowercase_ : str = OpenAIGPTConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = OpenAIGPTModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
lowercase_ : Dict = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase_ : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 321 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321 | 1 |
import os
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple = "input.txt" ) -> Tuple:
with open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) as input_file:
__lowercase = [
[int(_UpperCAmelCase ) for element in line.split(',' )]
for line in input_file.readlines()
]
__lowercase = len(_UpperCAmelCase )
__lowercase = len(matrix[0] )
__lowercase = [[-1 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
__lowercase = matrix[i][0]
for j in range(1 , _UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
__lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _UpperCAmelCase ):
__lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.framework == "tf":
A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = self.get_masked_index(snake_case_ )
A_ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ):
"""simple docstring"""
if return_tensors is None:
A_ : Any = self.framework
A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.model(**snake_case_ )
A_ : Optional[int] = model_inputs['input_ids']
return model_outputs
def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
A_ : str = target_ids.shape[0]
A_ : Optional[Any] = model_outputs['input_ids'][0]
A_ : List[Any] = model_outputs['logits']
if self.framework == "tf":
A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ : Union[str, Any] = outputs.numpy()
A_ : Optional[int] = outputs[0, masked_index, :]
A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 )
A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ )
A_ , A_ : str = topk.values.numpy(), topk.indices.numpy()
else:
A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ : Tuple = outputs[0, masked_index, :]
A_ : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
A_ : str = probs[..., target_ids]
A_ , A_ : List[str] = probs.topk(snake_case_ )
A_ : List[Any] = []
A_ : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
A_ : str = target_ids[p].tolist()
A_ : Union[str, Any] = p
# Filter padding out:
A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = [targets]
try:
A_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A_ : int = {}
A_ : Tuple = []
for target in targets:
A_ : int = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
A_ : Tuple = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids']
if len(snake_case_ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
A_ : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A_ : Tuple = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A_ : Optional[Any] = np.array(snake_case_ )
return target_ids
def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ):
"""simple docstring"""
A_ : List[str] = {}
if targets is not None:
A_ : Any = self.get_target_ids(snake_case_ , snake_case_ )
A_ : Optional[Any] = target_ids
if top_k is not None:
A_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[str] = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs | 286 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = ["input_ids", "attention_mask"]
lowerCAmelCase : Any = None
def __init__( self : Optional[int] ,_snake_case : List[Any]=None ,_snake_case : Optional[Any]=None ,_snake_case : int=None ,_snake_case : List[str]="<unk>" ,_snake_case : Optional[int]="<s>" ,_snake_case : Optional[Any]="</s>" ,_snake_case : Optional[Any]="<pad>" ,_snake_case : Optional[int]=False ,_snake_case : Tuple=False ,**_snake_case : List[str] ,) -> Tuple:
"""simple docstring"""
super().__init__(
_snake_case ,_snake_case ,tokenizer_file=_snake_case ,unk_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,pad_token=_snake_case ,add_prefix_space=_snake_case ,clean_up_tokenization_spaces=_snake_case ,**_snake_case ,)
lowercase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_snake_case ) != add_prefix_space:
lowercase__ : str = getattr(_snake_case ,pre_tok_state.pop('''type''' ) )
lowercase__ : int = add_prefix_space
lowercase__ : Optional[int] = pre_tok_class(**_snake_case )
lowercase__ : Any = add_prefix_space
def UpperCAmelCase ( self : Tuple ,*_snake_case : List[Any] ,**_snake_case : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
lowercase__ : Optional[Any] = kwargs.get('''is_split_into_words''' ,_snake_case )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,*_snake_case : Optional[int] ,**_snake_case : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
lowercase__ : List[Any] = kwargs.get('''is_split_into_words''' ,_snake_case )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self._tokenizer.model.save(_snake_case ,name=_snake_case )
return tuple(_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : "Conversation" ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case ,add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
lowercase__ : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 302 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowercase__ : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Optional[int] = model.generate(**_snake_case )
lowercase__ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int = model_reloaded.generate(**_snake_case )
self.assertTrue(torch.allclose(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_snake_case ):
model.save_pretrained(_snake_case )
lowercase__ : int = model.reverse_bettertransformer()
model.save_pretrained(_snake_case )
| 302 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _snake_case ( lowerCAmelCase__ ):
lowerCAmelCase_ : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCAmelCase_ : Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = v.to_dict()
return d
| 85 |
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48 | 0 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = None
@experimental
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
a_ , a_ , a_ , a_ , a_ , a_ , a_ )
return _map_with_joblib(a_ , a_ , a_ , a_ , a_ , a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
__A = num_proc if num_proc <= len(a_ ) else len(a_ )
__A = [] # We organize the splits ourselve (contiguous splits)
for index in range(a_ ):
__A = len(a_ ) // num_proc
__A = len(a_ ) % num_proc
__A = div * index + min(a_ , a_ )
__A = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(a_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'''Error dividing inputs iterable among processes. '''
F'''Total number of objects {len(a_ )}, '''
F'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
F'''Spawning {num_proc} processes for {len(a_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
__A , __A = None, None
if not disable_tqdm:
__A , __A = (RLock(),), tqdm.set_lock
with Pool(a_ , initargs=a_ , initializer=a_ ) as pool:
__A = pool.map(a_ , a_ )
logger.info(F'''Finished {num_proc} processes''' )
__A = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'''Unpacked {len(a_ )} objects''' )
return mapped
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=a_ ):
return joblib.Parallel()(
joblib.delayed(a_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__A = None
| 354 |
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE :Dict = 300 # TEMPERATURE (unit = K)
def UpperCAmelCase ( a_ , a_ , a_ , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "speech_to_text"
__UpperCamelCase = ["past_key_values"]
__UpperCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , lowercase_ : Optional[Any]=10000 , lowercase_ : Any=12 , lowercase_ : List[str]=2048 , lowercase_ : str=4 , lowercase_ : List[str]=6 , lowercase_ : Optional[int]=2048 , lowercase_ : Any=4 , lowercase_ : List[str]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=True , lowercase_ : str="relu" , lowercase_ : List[str]=256 , lowercase_ : Any=0.1 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=0.02 , lowercase_ : Tuple=2 , lowercase_ : List[Any]=True , lowercase_ : Dict=1 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=6000 , lowercase_ : List[Any]=1024 , lowercase_ : Optional[int]=2 , lowercase_ : Union[str, Any]=(5, 5) , lowercase_ : Any=1024 , lowercase_ : int=80 , lowercase_ : str=1 , **lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE_ : str = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Any = dropout
SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = activation_dropout
SCREAMING_SNAKE_CASE_ : Tuple = activation_function
SCREAMING_SNAKE_CASE_ : Optional[Any] = init_std
SCREAMING_SNAKE_CASE_ : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : Tuple = encoder_layers
SCREAMING_SNAKE_CASE_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : Any = max_source_positions
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_target_positions
SCREAMING_SNAKE_CASE_ : Tuple = num_conv_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(lowercase_)
SCREAMING_SNAKE_CASE_ : str = conv_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = input_feat_per_channel
SCREAMING_SNAKE_CASE_ : List[str] = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
F'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.')
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 91 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)])
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''')
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_model_config(lowercase_)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = GenerationConfig()
SCREAMING_SNAKE_CASE_ : Any = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = generation_config.update(**lowercase_)
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {'''foo''': '''bar'''})
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig()
SCREAMING_SNAKE_CASE_ : List[str] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir:
generation_config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = GenerationConfig.from_pretrained(lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig.from_model_config(lowercase_)
assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , lowercase_)
self.assertEqual(default_config.num_beams , 1)
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , lowercase_)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''')
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
| 91 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Tuple , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any]=7 , UpperCamelCase: List[Any]=3 , UpperCamelCase: Tuple=30 , UpperCamelCase: Any=4_00 , UpperCamelCase: str=True , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[int]=True , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: str=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: int=1 / 2_55 , UpperCamelCase: Dict=True , ):
"""simple docstring"""
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: int , UpperCamelCase: Any , UpperCamelCase: Optional[Any]=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(_a , Image.Image ):
A__ = image.size
else:
A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(_a , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(_a , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = DetaImageProcessingTester(self )
@property
def UpperCamelCase ( self: Any ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """do_rescale""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , _a )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
pass
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
A__ = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(_a , return_tensors="""pt""" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(_a , return_tensors="""pt""" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 3_97_69, "annotations": target}
# encode them
A__ = DetaImageProcessor()
A__ = image_processing(images=_a , annotations=_a , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , _a )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
A__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _a ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _a )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _a , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _a ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _a ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _a ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _a ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _a ) )
@slow
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = DetaImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , _a )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _a ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _a )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _a , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _a ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _a ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _a ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _a )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _a ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _a ) )
| 369 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , *UpperCamelCase: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Any=None ):
"""simple docstring"""
A__ = {}
if top_k is not None:
A__ = top_k
return {}, {}, postprocess_params
def __call__( self: Union[str, Any] , UpperCamelCase: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase: Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Any , UpperCamelCase: int ):
"""simple docstring"""
A__ = load_image(UpperCamelCase )
A__ = self.image_processor(images=UpperCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.model(**UpperCamelCase )
return model_outputs
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: int=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(UpperCamelCase )
elif self.framework == "tf":
A__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
A__ = tf.math.top_k(UpperCamelCase , k=UpperCamelCase )
A__ , A__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
| 69 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "x" , __lowerCAmelCase = 10**-10 , __lowerCAmelCase = 1 , )-> complex:
"""simple docstring"""
_UpperCAmelCase = symbols(__lowerCAmelCase )
_UpperCAmelCase = lambdify(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = lambdify(__lowerCAmelCase , diff(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(__lowerCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(__lowerCAmelCase ) / diff_function(
__lowerCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'''{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 39 |
from datetime import datetime as dt
import os
from github import Github
__A : Dict = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase : List[str] = g.get_repo('huggingface/transformers' )
lowerCAmelCase : Tuple = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase : Dict = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase : i.created_at, reverse=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 138 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __lowerCamelCase ( __magic_name__ : Dict , __magic_name__ : Tuple ):
a__: Optional[int] =k_size // 2
a__: Optional[int] =mgrid[0 - center : k_size - center, 0 - center : k_size - center]
a__: int =1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase__ ) + square(lowerCamelCase__ )) / (2 * square(lowerCamelCase__ )) )
return g
def __lowerCamelCase ( __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ):
a__: Optional[Any] =image.shape[0], image.shape[1]
# dst image height and width
a__: int =height - k_size + 1
a__: int =width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
a__: Dict =zeros((dst_height * dst_width, k_size * k_size) )
a__: List[Any] =0
for i, j in product(range(lowerCamelCase__ ) , range(lowerCamelCase__ ) ):
a__: Any =ravel(image[i : i + k_size, j : j + k_size] )
a__: List[str] =window
row += 1
# turn the kernel into shape(k*k, 1)
a__: str =gen_gaussian_kernel(lowerCamelCase__ , lowerCamelCase__ )
a__: List[Any] =ravel(lowerCamelCase__ )
# reshape and get the dst image
a__: List[str] =dot(lowerCamelCase__ , lowerCamelCase__ ).reshape(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
__UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 354 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__UpperCAmelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = '''left'''
def __init__( self : Dict , _a : List[Any] , _a : Any=False , _a : int=True , _a : Union[str, Any]=False , _a : Dict="<s>" , _a : str="</s>" , _a : Optional[int]="<unk>" , _a : Union[str, Any]="<sep>" , _a : List[Any]="<pad>" , _a : Optional[Any]="<cls>" , _a : str="<mask>" , _a : Any=["<eop>", "<eod>"] , _a : Optional[Dict[str, Any]] = None , **_a : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a__: Dict =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
a__: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
a__: Dict =3
a__: Tuple =do_lower_case
a__: int =remove_space
a__: List[Any] =keep_accents
a__: List[str] =vocab_file
a__: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _lowerCamelCase ( self : Any ):
return len(self.sp_model )
def _lowerCamelCase ( self : List[Any] ):
a__: Dict ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
a__: Dict =self.__dict__.copy()
a__: List[Any] =None
return state
def __setstate__( self : Optional[Any] , _a : Tuple ):
a__: List[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__: List[str] ={}
a__: int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : Dict , _a : str ):
if self.remove_space:
a__: Optional[int] =" ".join(inputs.strip().split() )
else:
a__: Optional[int] =inputs
a__: Dict =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__: Optional[int] =unicodedata.normalize("NFKD" , _a )
a__: int ="".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
a__: Dict =outputs.lower()
return outputs
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Dict =self.preprocess_text(_a )
a__: Dict =self.sp_model.encode(_a , out_type=_a )
a__: str =[]
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__: Optional[Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__: Optional[int] =cur_pieces[1:]
else:
a__: Tuple =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def _lowerCamelCase ( self : Dict , _a : Dict ):
return self.sp_model.PieceToId(_a )
def _lowerCamelCase ( self : Dict , _a : Optional[Any] ):
return self.sp_model.IdToPiece(_a )
def _lowerCamelCase ( self : Optional[Any] , _a : Tuple ):
a__: Tuple ="".join(_a ).replace(_a , " " ).strip()
return out_string
def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : bool = False , _a : bool = None , _a : bool = True , **_a : Union[str, Any] , ):
a__: Optional[int] =kwargs.pop("use_source_tokenizer" , _a )
a__: Any =self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__: List[str] =[]
a__: Any =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
a__: List[str] =[]
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a__: Union[str, Any] ="".join(_a )
a__: List[Any] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__: Optional[int] =self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _lowerCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Dict =[self.sep_token_id]
a__: Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Any =[self.sep_token_id]
a__: List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__: List[Any] =os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
a__: Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 42 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case_ : Any = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_UpperCamelCase , id=_UpperCamelCase )
| 279 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A , A , A=0.0 , A = None , A = "geglu" , A = None , A = False , A = False , A = False , A = False , A = True , A = "layer_norm" , A = False , ) -> str:
super().__init__()
snake_case : Tuple = only_cross_attention
snake_case : Optional[int] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
snake_case : List[str] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case : str = AdaLayerNorm(A , A )
elif self.use_ada_layer_norm_zero:
snake_case : str = AdaLayerNormZero(A , A )
else:
snake_case : Tuple = nn.LayerNorm(A , elementwise_affine=A )
snake_case : int = Attention(
query_dim=A , heads=A , dim_head=A , dropout=A , bias=A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case : int = (
AdaLayerNorm(A , A )
if self.use_ada_layer_norm
else nn.LayerNorm(A , elementwise_affine=A )
)
snake_case : List[Any] = Attention(
query_dim=A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=A , dim_head=A , dropout=A , bias=A , upcast_attention=A , ) # is self-attn if encoder_hidden_states is none
else:
snake_case : Any = None
snake_case : Any = None
# 3. Feed-forward
snake_case : Optional[Any] = nn.LayerNorm(A , elementwise_affine=A )
snake_case : List[str] = FeedForward(A , dropout=A , activation_fn=A , final_dropout=A )
# let chunk size default to None
snake_case : List[Any] = None
snake_case : List[str] = 0
def UpperCAmelCase ( self , A , A ) -> int:
# Sets chunk feed-forward
snake_case : Any = chunk_size
snake_case : Optional[int] = dim
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , ) -> Optional[int]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case : str = self.norma(A , A )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case : Union[str, Any] = self.norma(
A , A , A , hidden_dtype=hidden_states.dtype )
else:
snake_case : Union[str, Any] = self.norma(A )
snake_case : List[str] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case : List[Any] = self.attna(
A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=A , **A , )
if self.use_ada_layer_norm_zero:
snake_case : Any = gate_msa.unsqueeze(1 ) * attn_output
snake_case : Optional[Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case : str = (
self.norma(A , A ) if self.use_ada_layer_norm else self.norma(A )
)
snake_case : List[str] = self.attna(
A , encoder_hidden_states=A , attention_mask=A , **A , )
snake_case : int = attn_output + hidden_states
# 3. Feed-forward
snake_case : int = self.norma(A )
if self.use_ada_layer_norm_zero:
snake_case : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case : Dict = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case : Optional[Any] = torch.cat(
[self.ff(A ) for hid_slice in norm_hidden_states.chunk(A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case : Tuple = self.ff(A )
if self.use_ada_layer_norm_zero:
snake_case : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case : int = ff_output + hidden_states
return hidden_states
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A = None , A = 4 , A = 0.0 , A = "geglu" , A = False , ) -> str:
super().__init__()
snake_case : Union[str, Any] = int(dim * mult )
snake_case : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case : Optional[int] = GELU(A , A )
if activation_fn == "gelu-approximate":
snake_case : str = GELU(A , A , approximate="""tanh""" )
elif activation_fn == "geglu":
snake_case : Union[str, Any] = GEGLU(A , A )
elif activation_fn == "geglu-approximate":
snake_case : Any = ApproximateGELU(A , A )
snake_case : List[Any] = nn.ModuleList([] )
# project in
self.net.append(A )
# project dropout
self.net.append(nn.Dropout(A ) )
# project out
self.net.append(nn.Linear(A , A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(A ) )
def UpperCAmelCase ( self , A ) -> Any:
for module in self.net:
snake_case : Dict = module(A )
return hidden_states
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A , A = "none" ) -> str:
super().__init__()
snake_case : int = nn.Linear(A , A )
snake_case : Dict = approximate
def UpperCAmelCase ( self , A ) -> int:
if gate.device.type != "mps":
return F.gelu(A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
snake_case : List[str] = self.proj(A )
snake_case : Tuple = self.gelu(A )
return hidden_states
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A ) -> List[str]:
super().__init__()
snake_case : List[str] = nn.Linear(A , dim_out * 2 )
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
if gate.device.type != "mps":
return F.gelu(A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
snake_case , snake_case : Tuple = self.proj(A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(A )
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A ) -> int:
super().__init__()
snake_case : Any = nn.Linear(A , A )
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : Any = self.proj(A )
return x * torch.sigmoid(1.7_02 * x )
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A ) -> List[Any]:
super().__init__()
snake_case : List[Any] = nn.Embedding(A , A )
snake_case : List[str] = nn.SiLU()
snake_case : Union[str, Any] = nn.Linear(A , embedding_dim * 2 )
snake_case : List[str] = nn.LayerNorm(A , elementwise_affine=A )
def UpperCAmelCase ( self , A , A ) -> Optional[int]:
snake_case : Any = self.linear(self.silu(self.emb(A ) ) )
snake_case , snake_case : Union[str, Any] = torch.chunk(A , 2 )
snake_case : Optional[int] = self.norm(A ) * (1 + scale) + shift
return x
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A ) -> Any:
super().__init__()
snake_case : Any = CombinedTimestepLabelEmbeddings(A , A )
snake_case : int = nn.SiLU()
snake_case : str = nn.Linear(A , 6 * embedding_dim , bias=A )
snake_case : str = nn.LayerNorm(A , elementwise_affine=A , eps=1e-6 )
def UpperCAmelCase ( self , A , A , A , A=None ) -> Tuple:
snake_case : Optional[Any] = self.linear(self.silu(self.emb(A , A , hidden_dtype=A ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case : str = emb.chunk(6 , dim=1 )
snake_case : List[str] = self.norm(A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A , A , A = None , A = 1e-5 ) -> str:
super().__init__()
snake_case : Optional[Any] = num_groups
snake_case : List[str] = eps
if act_fn is None:
snake_case : List[str] = None
else:
snake_case : Optional[Any] = get_activation(A )
snake_case : Tuple = nn.Linear(A , out_dim * 2 )
def UpperCAmelCase ( self , A , A ) -> Optional[Any]:
if self.act:
snake_case : Any = self.act(A )
snake_case : List[str] = self.linear(A )
snake_case : Optional[Any] = emb[:, :, None, None]
snake_case , snake_case : int = emb.chunk(2 , dim=1 )
snake_case : int = F.group_norm(A , self.num_groups , eps=self.eps )
snake_case : str = x * (1 + scale) + shift
return x
| 176 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[str] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 176 | 1 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( A__ ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCamelCase ( A__ , A__ ) -> XGBClassifier:
"""simple docstring"""
UpperCamelCase = XGBClassifier()
classifier.fit(A__ , A__ )
return classifier
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
UpperCamelCase = load_iris()
UpperCamelCase , UpperCamelCase = data_handling(A__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = train_test_split(
A__ , A__ , test_size=0.25 )
UpperCamelCase = iris['target_names']
# Create an XGBoost Classifier from the training data
UpperCamelCase = xgboost(A__ , A__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A__ , A__ , A__ , display_labels=A__ , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 28 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCamelCase__ : List[str] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCamelCase__ : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCamelCase__ : Union[str, Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def __snake_case ( self , _A , _A , _A=None ) -> str:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(_A , _A , sample_weight=_A ) ),
}
| 246 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[dict] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : int , ) ->Tuple:
"""simple docstring"""
super().__init__(
features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , num_proc=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = Generator(
cache_dir=UpperCAmelCase_ , features=UpperCAmelCase_ , generator=UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ , **UpperCAmelCase_ , )
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
if self.streaming:
snake_case_ = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , num_proc=self.num_proc , )
snake_case_ = self.builder.as_dataset(
split="""train""" , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
| 359 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __A :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
snake_case_ = value
snake_case_ = None
snake_case_ = None
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Node ) ->None:
"""simple docstring"""
snake_case_ = tree
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Node | None ) ->int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : str ) ->Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a_ = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase )-> None:
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 340 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 145 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any=[] ) -> str:
"""simple docstring"""
lowerCAmelCase = size[0] - overlap_pixels * 2
lowerCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCAmelCase = np.pad(_SCREAMING_SNAKE_CASE , mode="""linear_ramp""" , pad_width=_SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
return max(_SCREAMING_SNAKE_CASE , min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : [int] , _SCREAMING_SNAKE_CASE : [int] , _SCREAMING_SNAKE_CASE : [int] ) -> List[str]:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _snake_case ( _SCREAMING_SNAKE_CASE : [int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : [int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCAmelCase = clamp_rect(_SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCAmelCase = tile.crop(_SCREAMING_SNAKE_CASE )
return tile
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
lowerCAmelCase = n % d
return n - divisor
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = 350 , ) -> List[str]:
super().__init__(
vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , low_res_scheduler=A_ , scheduler=A_ , max_noise_level=A_ , )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCAmelCase = add_overlap_rect(A_ , A_ , image.size )
lowerCAmelCase = image.crop(A_ )
lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCAmelCase = translated_slice_x - (original_image_slice / 2)
lowerCAmelCase = max(0 , A_ )
lowerCAmelCase = squeeze_tile(A_ , A_ , A_ , A_ )
lowerCAmelCase = to_input.size
lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCAmelCase = super(A_ , self ).__call__(image=A_ , **A_ ).images[0]
lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCAmelCase = unsqueeze_tile(A_ , A_ )
lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCAmelCase = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
lowerCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=A_ ) , mode="""L""" , )
final_image.paste(
A_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , A_ )
@torch.no_grad()
def __call__( self , A_ , A_ , A_ = 75 , A_ = 9.0 , A_ = 50 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 128 , A_ = 32 , A_ = 32 , ) -> Dict:
lowerCAmelCase = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
lowerCAmelCase = math.ceil(image.size[0] / tile_size )
lowerCAmelCase = math.ceil(image.size[1] / tile_size )
lowerCAmelCase = tcx * tcy
lowerCAmelCase = 0
for y in range(A_ ):
for x in range(A_ ):
self._process_tile(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , prompt=A_ , num_inference_steps=A_ , guidance_scale=A_ , noise_level=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def _snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
# Run a demo
lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(_SCREAMING_SNAKE_CASE , revision="""fp16""" , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to("""cuda""" )
lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
print(f'progress: {obj["progress"]:.4f}' )
obj["image"].save("""diffusers_library_progress.jpg""" )
lowerCAmelCase = pipe(image=_SCREAMING_SNAKE_CASE , prompt="""Black font, white background, vector""" , noise_level=40 , callback=_SCREAMING_SNAKE_CASE )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main() | 187 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ["image_processor", "tokenizer"]
UpperCAmelCase : Tuple = "BlipImageProcessor"
UpperCAmelCase : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , A_ , A_ ) -> Dict:
lowerCAmelCase = False
super().__init__(A_ , A_ )
lowerCAmelCase = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase = self.tokenizer
lowerCAmelCase = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
lowerCAmelCase = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
lowerCAmelCase = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __snake_case ( self , *A_ , **A_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*A_ , **A_ )
def __snake_case ( self , *A_ , **A_ ) -> Tuple:
return self.tokenizer.decode(*A_ , **A_ )
@property
def __snake_case ( self ) -> str:
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 187 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
snake_case_ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
snake_case_ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
snake_case_ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self :Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self :int , lowercase_ :str ) -> List[str]:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowercase_ )
UpperCAmelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowercase_ )
UpperCAmelCase = extract_label(lowercase_ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = config['image_size']
if not isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names]
UpperCAmelCase = list(set(lowercase_ ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(lowercase_ ) )
UpperCAmelCase = int(0.8 * len(lowercase_ ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowercase_ ),
'epoch': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 | 1 |
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : int ) -> list[int]:
lowerCamelCase_ = [True] * limit
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < limit:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def lowerCamelCase__ ( _lowerCamelCase : int = 1000000 ) -> int:
lowerCamelCase_ = prime_sieve(_lowerCamelCase )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length , len(_lowerCamelCase ) ):
lowerCamelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ = j - i
lowerCamelCase_ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 183 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase_ :List[Any] = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase_ :List[str] = get_activation("""gelu""" )
lowerCAmelCase_ :Optional[int] = get_activation("""gelu_10""" )
lowerCAmelCase_ :Tuple = torch_builtin(__A )
lowerCAmelCase_ :Optional[int] = geluaa(__A )
lowerCAmelCase_ :str = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(__A ):
get_activation("""bogus""" )
with self.assertRaises(__A ):
get_activation(__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = get_activation("""gelu""" )
lowerCAmelCase_ :List[str] = 1
lowerCAmelCase_ :List[Any] = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
lowerCAmelCase_ :Union[str, Any] = acta.a
| 351 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(_a )
class lowercase ( _a ):
"""simple docstring"""
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCAmelCase__ ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {}
UpperCamelCase__ :List[Any] = {}
if prompt is not None:
UpperCamelCase__ :List[str] = prompt
if generate_kwargs is not None:
UpperCamelCase__ :Optional[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCamelCase__ :str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
UpperCamelCase__ :Union[str, Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Any = load_image(UpperCamelCase_ )
if prompt is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'''Received an invalid text input, got - {type(UpperCamelCase_ )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
UpperCamelCase__ :int = self.model.config.model_type
if model_type == "git":
UpperCamelCase__ :Optional[int] = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
UpperCamelCase__ :str = self.tokenizer(text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ).input_ids
UpperCamelCase__ :Optional[int] = [self.tokenizer.cls_token_id] + input_ids
UpperCamelCase__ :int = torch.tensor(UpperCamelCase_ ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
UpperCamelCase__ :List[Any] = self.image_processor(images=UpperCamelCase_ , header_text=UpperCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCamelCase__ :Optional[int] = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
UpperCamelCase__ :Optional[int] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCamelCase__ :int = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCamelCase__ :Optional[Any] = None
return model_inputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , UpperCamelCase_ )
and all(x is None for x in model_inputs['''input_ids'''] )
):
UpperCamelCase__ :List[Any] = None
if generate_kwargs is None:
UpperCamelCase__ :int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCamelCase__ :Optional[int] = model_inputs.pop(self.model.main_input_name )
UpperCamelCase__ :Optional[Any] = self.model.generate(UpperCamelCase_ , **UpperCamelCase_ , **UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = []
for output_ids in model_outputs:
UpperCamelCase__ :List[str] = {
'generated_text': self.tokenizer.decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , )
}
records.append(UpperCamelCase_ )
return records | 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[Any] = {
"""google/electra-small-generator""": 5_12,
"""google/electra-base-generator""": 5_12,
"""google/electra-large-generator""": 5_12,
"""google/electra-small-discriminator""": 5_12,
"""google/electra-base-discriminator""": 5_12,
"""google/electra-large-discriminator""": 5_12,
}
lowerCAmelCase : Dict = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ElectraTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Tuple = getattr(snake_case__ , normalizer_state.pop('type' ) )
_lowerCAmelCase : List[Any] = do_lower_case
_lowerCAmelCase : List[Any] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Optional[int] = normalizer_class(**snake_case__ )
_lowerCAmelCase : List[Any] = do_lower_case
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
"""simple docstring"""
import numpy as np
def _snake_case ( UpperCamelCase : np.array ):
return 1 / (1 + np.exp(-vector ))
def _snake_case ( UpperCamelCase : np.array ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase = "\\n Text data.\n Second line of data."
lowercase = "file"
@pytest.fixture(scope='session')
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / (FILE_PATH + '.zstd')
snake_case_ = bytes(a_ , 'utf-8')
with zstd.open(a_ , 'wb') as f:
f.write(a_)
return path
@pytest.fixture
def __UpperCAmelCase ( a_):
with open(os.path.join(tmpfs.local_root_dir , a_) , 'w') as f:
f.write(a_)
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'])
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_):
snake_case_ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
snake_case_ = input_paths[compression_format]
snake_case_ = tmp_path / 'cache'
snake_case_ = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_)
snake_case_ = cached_path(a_ , download_config=a_)
with open(a_) as f:
snake_case_ = f.read()
with open(a_) as f:
snake_case_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False])
@pytest.mark.parametrize('default_cache_dir' , [True, False])
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_):
snake_case_ = 'custom_cache'
snake_case_ = 'custom_extracted_dir'
snake_case_ = tmp_path / 'custom_extracted_path'
if default_extracted:
snake_case_ = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , a_)
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(a_))
snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ = xz_file
snake_case_ = (
DownloadConfig(extract_compressed_file=a_)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_)
)
snake_case_ = cached_path(a_ , download_config=a_)
assert Path(a_).parent.parts[-2:] == expected
def __UpperCAmelCase ( a_):
# absolute path
snake_case_ = str(Path(a_).resolve())
assert cached_path(a_) == text_file
# relative path
snake_case_ = str(Path(a_).resolve().relative_to(Path(os.getcwd())))
assert cached_path(a_) == text_file
def __UpperCAmelCase ( a_):
# absolute path
snake_case_ = str(tmp_path.resolve() / '__missing_file__.txt')
with pytest.raises(a_):
cached_path(a_)
# relative path
snake_case_ = './__missing_file__.txt'
with pytest.raises(a_):
cached_path(a_)
def __UpperCAmelCase ( a_):
snake_case_ = get_from_cache(f'''tmp://{tmpfs_file}''')
with open(a_) as f:
snake_case_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( ):
with pytest.raises(a_):
cached_path('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
http_get('https://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
http_head('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
ftp_get('ftp://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
ftp_head('ftp://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
fsspec_get('s3://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
fsspec_head('s3://huggingface.co')
| 178 | 0 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase , output_loading_info=lowerCAmelCase )
else:
UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase , output_loading_info=lowerCAmelCase )
UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
UpperCAmelCase = prophet
UpperCAmelCase = prophet_old
else:
UpperCAmelCase = prophet.prophetnet
UpperCAmelCase = prophet_old.model
UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase = mapping[attribute]
if not hasattr(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) > 0:
UpperCAmelCase = attribute
elif hasattr(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase = old_model.bias
logger.info(F'''{attribute} is initialized''' )
UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase , """in_proj_weight""" ):
UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase = True
break
if attribute.isdigit():
UpperCAmelCase = model[int(lowerCAmelCase )]
UpperCAmelCase = old_model[int(lowerCAmelCase )]
else:
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
if old_attribute == "":
UpperCAmelCase = old_model
else:
if not hasattr(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 248 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase , lowerCAmelCase )
# Predict target for test data
UpperCAmelCase = xgb.predict(lowerCAmelCase )
UpperCAmelCase = predictions.reshape(len(lowerCAmelCase ) , 1 )
return predictions
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = fetch_california_housing()
UpperCAmelCase , UpperCAmelCase = data_handling(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(
lowerCAmelCase , lowerCAmelCase , test_size=0.25 , random_state=1 )
UpperCAmelCase = xgboost(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase , lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase , lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 248 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
_A : Tuple = DistilBertTokenizer
_A : Dict = DistilBertTokenizerFast
_A : Optional[Any] = True
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowercase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowercase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowercase : int = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
__lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Tuple ) -> str:
__UpperCAmelCase : List[Any] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase : Union[str, Any] = """The dog is cute and lives in the garden house"""
__UpperCAmelCase : int = jnp.array([tokenizer.encode(snake_case )] )
__UpperCAmelCase : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__UpperCAmelCase : Optional[int] = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
__UpperCAmelCase : Optional[Any] = model(snake_case )["""last_hidden_state"""]
self.assertEqual(output.shape , snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case , atol=1E-3 ) )
| 351 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__UpperCAmelCase :Dict = logging.get_logger(__name__)
__UpperCAmelCase :str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase :Optional[int] = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase :Union[str, Any] = {
"bert-base-uncased": 5_1_2,
"bert-large-uncased": 5_1_2,
"bert-base-cased": 5_1_2,
"bert-large-cased": 5_1_2,
"bert-base-multilingual-uncased": 5_1_2,
"bert-base-multilingual-cased": 5_1_2,
"bert-base-chinese": 5_1_2,
"bert-base-german-cased": 5_1_2,
"bert-large-uncased-whole-word-masking": 5_1_2,
"bert-large-cased-whole-word-masking": 5_1_2,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-base-cased-finetuned-mrpc": 5_1_2,
"bert-base-german-dbmdz-cased": 5_1_2,
"bert-base-german-dbmdz-uncased": 5_1_2,
"TurkuNLP/bert-base-finnish-cased-v1": 5_1_2,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_1_2,
"wietsedv/bert-base-dutch-cased": 5_1_2,
}
__UpperCAmelCase :str = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer
def __init__( self : Optional[int] , snake_case : Union[str, Any]=None , snake_case : str=None , snake_case : Any=True , snake_case : Tuple="[UNK]" , snake_case : int="[SEP]" , snake_case : Optional[Any]="[PAD]" , snake_case : int="[CLS]" , snake_case : Optional[Any]="[MASK]" , snake_case : Union[str, Any]=True , snake_case : List[Any]=None , **snake_case : int , ) -> str:
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
__UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case ) != tokenize_chinese_chars
):
__UpperCAmelCase : List[Any] = getattr(snake_case , normalizer_state.pop('''type''' ) )
__UpperCAmelCase : List[Any] = do_lower_case
__UpperCAmelCase : List[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : List[str] = normalizer_class(**snake_case )
__UpperCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[Any] , snake_case : Tuple , snake_case : List[str]=None ) -> str:
__UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case ) | 240 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = """Hello, World!"""
_UpperCAmelCase : Any = """en_XX"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase__ : Optional[Any] = Path('data_bin' )
lowerCamelCase__ : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_UpperCAmelCase ).parent ) , checkpoint_file=Path(_UpperCAmelCase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(_UpperCAmelCase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(_UpperCAmelCase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(_UpperCAmelCase )
lowerCamelCase__ : Any = xmod.model.encoder.sentence_encoder
lowerCamelCase__ : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase__ : Optional[int] = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , _UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = XmodForSequenceClassification(_UpperCAmelCase ) if classification_head else XmodForMaskedLM(_UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__ : Any = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase__ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase__ : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase__ : Any = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase__ : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase__ : Dict = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase__ : Union[str, Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
lowerCamelCase__ : Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase__ : Any = xmod_layer.self_attn.q_proj.bias
lowerCamelCase__ : Tuple = xmod_layer.self_attn.k_proj.weight
lowerCamelCase__ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase__ : int = xmod_layer.self_attn.v_proj.weight
lowerCamelCase__ : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__ : str = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
lowerCamelCase__ : Optional[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase__ : Tuple = xmod_layer.self_attn.out_proj.bias
lowerCamelCase__ : Dict = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase__ : Tuple = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase__ : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
lowerCamelCase__ : Tuple = xmod_layer.fca.weight
lowerCamelCase__ : List[str] = xmod_layer.fca.bias
# output
lowerCamelCase__ : List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
lowerCamelCase__ : Union[str, Any] = xmod_layer.fca.weight
lowerCamelCase__ : int = xmod_layer.fca.bias
lowerCamelCase__ : List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase__ : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase__ : Dict = xmod_layer.adapter_layer_norm.weight
lowerCamelCase__ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase__ : Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase__ : List[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase__ : Dict = from_adapter.fca.weight
lowerCamelCase__ : Any = from_adapter.fca.bias
lowerCamelCase__ : Optional[Any] = from_adapter.fca.weight
lowerCamelCase__ : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase__ : List[Any] = xmod_sent_encoder.layer_norm.weight
lowerCamelCase__ : List[str] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase__ : Union[str, Any] = xmod.model.classification_heads['mnli'].dense.weight
lowerCamelCase__ : Any = xmod.model.classification_heads['mnli'].dense.bias
lowerCamelCase__ : int = xmod.model.classification_heads['mnli'].out_proj.weight
lowerCamelCase__ : Union[str, Any] = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCamelCase__ : Optional[int] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase__ : Any = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase__ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__ : str = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__ : str = xmod.model.encoder.lm_head.weight
lowerCamelCase__ : Optional[Any] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__ : Optional[Any] = xmod.encode(_UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(_UpperCAmelCase )[0]
if classification_head:
lowerCamelCase__ : int = xmod.model.classification_heads['mnli'](xmod.extract_features(_UpperCAmelCase ) )
else:
lowerCamelCase__ : Any = xmod.model(_UpperCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase__ : Any = torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(_UpperCAmelCase ).mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
_UpperCAmelCase : Tuple = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 50 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_UpperCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase__ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = False if not self.vocab_file else True
lowerCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : str = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any] , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Dict , ) -> BatchEncoding:
lowerCamelCase__ : Any = src_lang
lowerCamelCase__ : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Any ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str , UpperCAmelCase : Optional[Any] ) -> None:
lowerCamelCase__ : int = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : int = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : int , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Any = [self.cur_lang_code]
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
lowerCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 50 | 1 |
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowercase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
'''simple docstring'''
def __init__(self , __a=None , **__a ) -> List[Any]:
"""simple docstring"""
super().__init__(features=__A )
UpperCAmelCase__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
import torch
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__A )
return column
def UpperCamelCase__ (self , __a ) -> List[str]:
"""simple docstring"""
import torch
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase__ = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCAmelCase__ = {"""dtype""": torch.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase__ = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
UpperCAmelCase__ = np.asarray(__A )
return torch.tensor(__A , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(__A , '__array__' ) and not isinstance(__A , torch.Tensor ):
UpperCAmelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def UpperCamelCase__ (self , __a ) -> Optional[Any]:
"""simple docstring"""
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def UpperCamelCase__ (self , __a ) -> Mapping:
"""simple docstring"""
UpperCAmelCase__ = self.numpy_arrow_extractor().extract_row(__A )
UpperCAmelCase__ = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def UpperCamelCase__ (self , __a ) -> "torch.Tensor":
"""simple docstring"""
UpperCAmelCase__ = self.numpy_arrow_extractor().extract_column(__A )
UpperCAmelCase__ = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
UpperCAmelCase__ = self.recursive_tensorize(__A )
UpperCAmelCase__ = self._consolidate(__A )
return column
def UpperCamelCase__ (self , __a ) -> Mapping:
"""simple docstring"""
UpperCAmelCase__ = self.numpy_arrow_extractor().extract_batch(__A )
UpperCAmelCase__ = self.python_features_decoder.decode_batch(__A )
UpperCAmelCase__ = self.recursive_tensorize(__A )
for column_name in batch:
UpperCAmelCase__ = self._consolidate(batch[column_name] )
return batch
| 351 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=3 , __A=32 , __A=3 , __A=10 , __A=[10, 20, 30, 40] , __A=[1, 1, 2, 1] , __A=True , __A=True , __A="relu" , __A=3 , __A=None , ) -> Any:
a =parent
a =batch_size
a =image_size
a =num_channels
a =embeddings_size
a =hidden_sizes
a =depths
a =is_training
a =use_labels
a =hidden_act
a =num_labels
a =scope
a =len(__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a =self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Tuple:
a =FlaxRegNetModel(config=__A )
a =model(__A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Any:
a =self.num_labels
a =FlaxRegNetForImageClassification(config=__A )
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.prepare_config_and_inputs()
a , a =config_and_inputs
a ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> None:
a =FlaxRegNetModelTester(self )
a =ConfigTester(self , config_class=__A , has_text_modality=__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
return
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__A )
a =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
def check_hidden_states_output(__A , __A , __A ):
a =model_class(__A )
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a =self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a =True
check_hidden_states_output(__A , __A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a =self._prepare_for_class(__A , __A )
a =model_class(__A )
@jax.jit
def model_jitted(__A , **__A ):
return model(pixel_values=__A , **__A )
with self.subTest('''JIT Enabled''' ):
a =model_jitted(**__A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
a =model_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
def _A ( ):
"""simple docstring"""
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
a =self.default_image_processor
a =prepare_img()
a =image_processor(images=__A , return_tensors='''np''' )
a =model(**__A )
# verify the logits
a =(1, 1000)
self.assertEqual(outputs.logits.shape , __A )
a =jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) ) | 81 |
"""simple docstring"""
from __future__ import annotations
import math
def _A ( lowercase ):
"""simple docstring"""
if num <= 0:
a =f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowercase )
a =[True] * (num + 1)
a =[]
a =2
a =int(math.sqrt(lowercase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowercase )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowercase ):
if sieve[i] is True:
a =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowercase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip()))) | 81 | 1 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , _lowerCAmelCase : Dict ):
# we need a list not a string, so do something to change the type
__snake_case : Any = arr.split(""",""" )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Optional[Any] = [int(self.array[0] )] * len(self.array )
__snake_case : Union[str, Any] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__snake_case : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__snake_case : int = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase_ = input("please input some numbers:")
lowercase_ = SubArray(whole_array)
lowercase_ = array.solve_sub_array()
print(("the results is:", re))
| 20 | import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__="None" , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = relative_attention
__lowerCamelCase = position_biased_input
__lowerCamelCase = pos_att_type
__lowerCamelCase = scope
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaModel(config=__a )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__a )
__lowerCamelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaForMaskedLM(config=__a )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFDebertaVaForSequenceClassification(config=__a )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFDebertaVaForTokenClassification(config=__a )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaForQuestionAnswering(config=__a )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
__lowerCamelCase
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(__a )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
__lowerCamelCase = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__lowerCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCamelCase = model(__a , attention_mask=__a )[0]
__lowerCamelCase = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __a , atol=1e-4 )
| 90 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , ):
if attention_mask is None:
__lowercase : Tuple = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowercase : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowercase : Any = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCAmelCase_ )
if decoder_head_mask is None:
__lowercase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ )
if cross_attn_head_mask is None:
__lowercase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : str , __a : Tuple=13 , __a : List[Any]=7 , __a : Any=True , __a : List[str]=False , __a : Optional[Any]=99 , __a : Tuple=16 , __a : int=2 , __a : Optional[Any]=4 , __a : int=4 , __a : Any="relu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Dict=0.0 , __a : List[str]=0.0 , __a : Union[str, Any]=20 , __a : str=2 , __a : str=1 , __a : Optional[int]=0 , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = batch_size
__lowercase : Any = seq_length
__lowercase : Tuple = is_training
__lowercase : Optional[Any] = use_labels
__lowercase : Dict = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Tuple = encoder_layerdrop
__lowercase : List[str] = decoder_layerdrop
__lowercase : Any = max_position_embeddings
__lowercase : Any = eos_token_id
__lowercase : Dict = pad_token_id
__lowercase : List[str] = bos_token_id
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = self.eos_token_id # Eos Token
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowercase : Tuple = input_ids.clamp(self.pad_token_id + 1 )
__lowercase : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowercase : List[str] = self.get_config()
__lowercase : str = prepare_mam_aaa_inputs_dict(__a , __a , __a )
return config, inputs_dict
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase , __lowercase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : int , __a : str , __a : str ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = MaMaaaModel(config=__a ).get_decoder().to(__a ).eval()
__lowercase : List[str] = inputs_dict["""input_ids"""]
__lowercase : Dict = inputs_dict["""attention_mask"""]
__lowercase : List[Any] = inputs_dict["""head_mask"""]
# first forward pass
__lowercase : List[str] = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
__lowercase , __lowercase : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowercase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowercase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowercase : Optional[int] = model(__a , attention_mask=__a )["""last_hidden_state"""]
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , past_key_values=__a )[
"""last_hidden_state"""
]
# select random slice
__lowercase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-2 ) )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : List[str] ) -> str:
"""simple docstring"""
__lowercase : Dict = MaMaaaModel(config=__a ).to(__a ).eval()
__lowercase : Any = model(**__a )
__lowercase : str = outputs.encoder_last_hidden_state
__lowercase : Optional[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : str = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[Any] = MaMaaaEncoder.from_pretrained(__a ).to(__a )
__lowercase : Tuple = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Tuple = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = MaMaaaDecoder.from_pretrained(__a ).to(__a )
__lowercase : Tuple = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__a , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : int = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_A : int = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_A : Union[str, Any] = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_A : Optional[int] = True
_A : Union[str, Any] = True
_A : Any = False
_A : int = False
def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = MaMaaaModelTester(self )
__lowercase : Union[str, Any] = ConfigTester(self , config_class=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : str = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__lowercase : Union[str, Any] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Any = copy.deepcopy(self._prepare_for_class(__a , __a ) )
if not self.is_encoder_decoder:
__lowercase : int = inputs["""input_ids"""]
del inputs["input_ids"]
else:
__lowercase : Optional[int] = inputs["""input_ids"""]
__lowercase : Optional[Any] = inputs.get("""decoder_input_ids""" , __a )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __a )
__lowercase : Union[str, Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
__lowercase : Dict = wte(__a )
else:
__lowercase : str = wte(__a )
__lowercase : Union[str, Any] = wte(__a )
with torch.no_grad():
model(**__a )[0]
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs()
__lowercase : List[str] = input_dict["""input_ids"""]
__lowercase : Optional[int] = input_ids.ne(1 ).to(__a )
__lowercase : List[Any] = MaMaaaForConditionalGeneration(__a ).eval().to(__a )
if torch_device == "cuda":
model.half()
model.generate(__a , attention_mask=__a )
model.generate(num_beams=4 , do_sample=__a , early_stopping=__a , num_return_sequences=3 )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
return torch.tensor(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ )
lowerCamelCase : Dict = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
__lowercase : Union[str, Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
__lowercase : int = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
__lowercase : int = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
__lowercase : int = model(**__a )[0]
__lowercase : int = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , __a )
# change to expected output here
__lowercase : Dict = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
# change to intended input
__lowercase : Any = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
__lowercase : Union[str, Any] = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
__lowercase : Tuple = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
__lowercase : Optional[Any] = model(**__a )[0]
__lowercase : Tuple = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __a )
# change to expected output here
__lowercase : Union[str, Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : List[str] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
__lowercase : Tuple = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
__lowercase : Dict = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__lowercase : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors="""pt""" )
__lowercase : Dict = model.generate(
input_ids=dct["""input_ids"""].to(__a ) , attention_mask=dct["""attention_mask"""].to(__a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
__lowercase : Any = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
__lowercase : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__a , skip_special_tokens=__a )
assert generated == expected_en | 233 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCamelCase_ = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/"))
a_ = self.transformer_dir
shutil.copy(
os.path.join(__UpperCAmelCase , "src/transformers/models/bert/modeling_bert.py") , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py") , )
def UpperCAmelCase__ ( self) ->int:
a_ = "src/transformers"
shutil.rmtree(self.transformer_dir)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->Dict:
a_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
a_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19)
a_ = black.format_str(__UpperCAmelCase , mode=__UpperCAmelCase)
a_ = os.path.join(self.transformer_dir , "new_code.py")
with open(__UpperCAmelCase , "w" , newline="\n") as f:
f.write(__UpperCAmelCase)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCAmelCase)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCAmelCase)
with open(__UpperCAmelCase , "r") as f:
self.assertTrue(f.read() , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __UpperCAmelCase) , )
# Copy consistency with a really long name
a_ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , __UpperCAmelCase , __UpperCAmelCase) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __UpperCAmelCase , overwrite_result=re.sub("Bert" , "TestModel" , __UpperCAmelCase) , )
def UpperCAmelCase__ ( self) ->int:
a_ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
a_ , a_ = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme["format_model_list"])
self.assertFalse(__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
a_ , a_ = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme["format_model_list"])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__UpperCAmelCase)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
a_ , a_ = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme["format_model_list"])
# Check if the model link is synchronized.
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
| 364 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins | 303 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def A ( _lowerCamelCase="" ):
'''simple docstring'''
_lowerCAmelCase : Tuple = tempfile.mkdtemp()
return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.rand(12, dtype=torch.floataa) - 0.5
_lowerCAmelCase : List[str] = AgentAudio(__a)
_lowerCAmelCase : int = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__a, agent_type.to_raw(), atol=1E-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__a))
# Ensure that the file contains the same value as the original tensor
_lowerCAmelCase , _lowerCAmelCase : List[str] = sf.read(__a)
self.assertTrue(torch.allclose(__a, torch.tensor(__a), atol=1E-4))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = torch.rand(12, dtype=torch.floataa) - 0.5
_lowerCAmelCase : Optional[int] = get_new_path(suffix=".wav")
sf.write(__a, __a, 1_6000)
_lowerCAmelCase : Any = AgentAudio(__a)
self.assertTrue(torch.allclose(__a, agent_type.to_raw(), atol=1E-4))
self.assertEqual(agent_type.to_string(), __a)
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.randint(0, 256, (64, 64, 3))
_lowerCAmelCase : str = AgentImage(__a)
_lowerCAmelCase : Optional[Any] = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__a, agent_type._tensor, atol=1E-4))
self.assertIsInstance(agent_type.to_raw(), Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
_lowerCAmelCase : Tuple = Image.open(__a)
_lowerCAmelCase : Optional[Any] = AgentImage(__a)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
_lowerCAmelCase : Optional[Any] = Image.open(__a)
_lowerCAmelCase : Any = AgentImage(__a)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__a))
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = "Hey!"
_lowerCAmelCase : Any = AgentText(__a)
self.assertEqual(__a, agent_type.to_string())
self.assertEqual(__a, agent_type.to_raw())
self.assertEqual(__a, __a)
| 36 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _snake_case ( _a ):
_A : Optional[int] = '''t5'''
_A : Union[str, Any] = ['''past_key_values''']
_A : Dict = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=32_128 ,SCREAMING_SNAKE_CASE__ : List[str]=512 ,SCREAMING_SNAKE_CASE__ : Any=64 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_048 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6 ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Dict=8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=32 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=128 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=1e-6 ,SCREAMING_SNAKE_CASE__ : str=1.0 ,SCREAMING_SNAKE_CASE__ : int="relu" ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Dict=0 ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
SCREAMING_SNAKE_CASE:int = vocab_size
SCREAMING_SNAKE_CASE:Any = d_model
SCREAMING_SNAKE_CASE:Union[str, Any] = d_kv
SCREAMING_SNAKE_CASE:Optional[int] = d_ff
SCREAMING_SNAKE_CASE:Tuple = num_layers
SCREAMING_SNAKE_CASE:str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE:Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE:int = relative_attention_num_buckets
SCREAMING_SNAKE_CASE:Tuple = relative_attention_max_distance
SCREAMING_SNAKE_CASE:Dict = dropout_rate
SCREAMING_SNAKE_CASE:List[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE:List[str] = initializer_factor
SCREAMING_SNAKE_CASE:Tuple = feed_forward_proj
SCREAMING_SNAKE_CASE:str = use_cache
SCREAMING_SNAKE_CASE:Optional[Any] = self.feed_forward_proj.split("-" )
SCREAMING_SNAKE_CASE:Any = act_info[-1]
SCREAMING_SNAKE_CASE:Tuple = act_info[0] == "gated"
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE:int = "gelu_new"
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:int = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
SCREAMING_SNAKE_CASE:Optional[int] = "past_encoder_sequence + sequence"
SCREAMING_SNAKE_CASE:str = {0: "batch"}
SCREAMING_SNAKE_CASE:List[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE:Tuple = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE:List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction="inputs" )
return common_inputs
@property
def __UpperCamelCase ( self : Optional[int] ):
return 13
| 139 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : List[Any] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = "speech_to_text"
_UpperCamelCase : List[Any] = ["past_key_values"]
_UpperCamelCase : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a__=10000 , a__=12 , a__=2048 , a__=4 , a__=6 , a__=2048 , a__=4 , a__=0.0 , a__=0.0 , a__=True , a__=True , a__="relu" , a__=256 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=2 , a__=True , a__=1 , a__=0 , a__=2 , a__=6000 , a__=1024 , a__=2 , a__=(5, 5) , a__=1024 , a__=80 , a__=1 , **a__ , ):
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : str = d_model
_lowerCAmelCase : List[str] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : List[str] = encoder_attention_heads
_lowerCAmelCase : Tuple = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : Tuple = decoder_attention_heads
_lowerCAmelCase : Dict = dropout
_lowerCAmelCase : Union[str, Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Any = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Union[str, Any] = encoder_layerdrop
_lowerCAmelCase : str = decoder_layerdrop
_lowerCAmelCase : int = use_cache
_lowerCAmelCase : Optional[Any] = encoder_layers
_lowerCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : str = max_source_positions
_lowerCAmelCase : List[Any] = max_target_positions
_lowerCAmelCase : List[str] = num_conv_layers
_lowerCAmelCase : int = list(a__ )
_lowerCAmelCase : Any = conv_channels
_lowerCAmelCase : Union[str, Any] = input_feat_per_channel
_lowerCAmelCase : Tuple = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , **a__ , )
| 126 | """simple docstring"""
from manim import *
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : Any = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Tuple = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Optional[Any] = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Dict = Text("""CPU""" , font_size=24 )
_lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_lowerCAmelCase : Dict = [mem.copy() for i in range(4 )]
_lowerCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Tuple = Text("""GPU""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : List[str] = Text("""Model""" , font_size=24 )
_lowerCAmelCase : Any = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_lowerCAmelCase : Tuple = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase : List[str] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
cpu_targs.append(a__ )
_lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , aligned_edge=a__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : List[str] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_lowerCAmelCase : int = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase : List[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ) , Write(a__ ) )
self.play(Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_lowerCAmelCase : int = []
_lowerCAmelCase : List[Any] = []
for i, rect in enumerate(a__ ):
_lowerCAmelCase : Tuple = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
first_animations.append(GrowFromCenter(a__ , run_time=1 ) )
_lowerCAmelCase : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(*a__ )
self.wait()
| 126 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser(_UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : int = TensorFlowBenchmark(args=_UpperCamelCase )
try:
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE : int = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
SCREAMING_SNAKE_CASE : Any = ''' '''.join(str(_UpperCamelCase ).split(""" """ )[:-1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
SCREAMING_SNAKE_CASE : Any = eval(str(_UpperCamelCase ).split(""" """ )[-1] )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : List[str] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 323 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279 | 0 |
"""simple docstring"""
from collections import defaultdict
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : Union[str, Any] = first_str.lower().strip()
UpperCAmelCase__ : Any = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase__ : Optional[int] = first_str.replace(""" """ , """""" )
UpperCAmelCase__ : Tuple = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
# Default values for count should be 0
UpperCAmelCase__ : defaultdict[str, int] = defaultdict(__lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_A = input("""Enter the first string """).strip()
_A = input("""Enter the second string """).strip()
_A = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 360 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = dataset
UpperCAmelCase__ : Union[str, Any] = process
UpperCAmelCase__ : List[Any] = params
def __len__(self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = self.dataset[i]
UpperCAmelCase__ : Any = self.process(_lowerCamelCase , **self.params )
return processed
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = loader
UpperCAmelCase__ : int = infer
UpperCAmelCase__ : Optional[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Any = loader_batch_size
# Internal bookkeeping
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Union[str, Any] = None
def __len__(self ):
"""simple docstring"""
return len(self.loader )
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = iter(self.loader )
return self
def _a (self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase__ : Optional[int] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase__ : List[str] = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# Convert ModelOutput to tuple first
UpperCAmelCase__ : List[Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase__ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase__ : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCamelCase , _lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase__ : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase__ : str = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ : Tuple = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ : Dict = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase__ : Optional[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase__ : Union[str, Any] = self._loader_batch_data.__class__(_lowerCamelCase )
self._loader_batch_index += 1
return result
def _a (self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase__ : str = next(self.iterator )
UpperCAmelCase__ : Union[str, Any] = self.infer(_lowerCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ : List[Any] = processed
else:
UpperCAmelCase__ : List[str] = list(processed.keys() )[0]
UpperCAmelCase__ : List[str] = processed[key]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : Any = len(_lowerCamelCase )
else:
UpperCAmelCase__ : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ : Optional[int] = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase__ : List[Any] = processed
UpperCAmelCase__ : Optional[int] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
"""simple docstring"""
super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = iter(self.loader )
UpperCAmelCase__ : List[Any] = None
return self
def _a (self ):
"""simple docstring"""
if self.subiterator is None:
UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCAmelCase__ : List[str] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
UpperCAmelCase__ : List[str] = next(self.subiterator )
return processed
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : str = iter(self.loader )
return self
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ : List[Any] = self.loader_batch_item()
UpperCAmelCase__ : Dict = item.pop("""is_last""" )
accumulator.append(_lowerCamelCase )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase__ : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ : Dict = processed
else:
UpperCAmelCase__ : List[Any] = list(processed.keys() )[0]
UpperCAmelCase__ : List[Any] = processed[key]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : int = len(_lowerCamelCase )
else:
UpperCAmelCase__ : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ : str = observed_batch_size
UpperCAmelCase__ : Union[str, Any] = processed
UpperCAmelCase__ : List[Any] = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ : Union[str, Any] = self.loader_batch_item()
UpperCAmelCase__ : int = item.pop("""is_last""" )
accumulator.append(_lowerCamelCase )
if is_last:
return accumulator
else:
UpperCAmelCase__ : Any = processed
UpperCAmelCase__ : Any = item.pop("""is_last""" )
accumulator.append(_lowerCamelCase )
return accumulator
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = dataset
UpperCAmelCase__ : Union[str, Any] = key
def __len__(self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
return self.dataset[i][self.key]
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = dataset
UpperCAmelCase__ : Any = keya
UpperCAmelCase__ : str = keya
def __len__(self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 166 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __snake_case ( _lowerCAmelCase : List[str] ) -> List[str]:
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = metric_id
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = [MetricMock(UpperCamelCase__ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ) -> Optional[int]:
if "tmp_path" in args:
A_ : int = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(snake_case_ , match="https://huggingface.co/docs/evaluate" ):
func(*snake_case_ )
| 300 | '''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__A = random.Random()
def __A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
'''simple docstring'''
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , __A: Union[str, Any] , __A: List[str]=7 , __A: Optional[Any]=4_00 , __A: List[Any]=20_00 , __A: Tuple=24 , __A: Any=24 , __A: Union[str, Any]=0.0 , __A: Union[str, Any]=1_60_00 , __A: Dict=True , __A: Dict=True , ) -> int:
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = feature_size
_A = num_mel_bins
_A = padding_value
_A = sampling_rate
_A = return_attention_mask
_A = do_normalize
def __A ( self: Optional[Any] ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self: int , __A: str=False , __A: Optional[Any]=False ) -> Tuple:
def _flatten(__A: List[str] ):
return list(itertools.chain(*__A ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def __A ( self: Dict ) -> Union[str, Any]:
_A = SpeechaTextFeatureExtractionTester(self )
def __A ( self: Dict , __A: Tuple ) -> List[str]:
self.assertTrue(np.all(np.mean(__A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self: str ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = [np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
_A = feature_extractor(__A , padding=__A , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_A = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_A = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test batched
_A = feature_extractor(__A , return_tensors='''np''' ).input_features
_A = feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_A = np.asarray(__A )
_A = feature_extractor(__A , return_tensors='''np''' ).input_features
_A = feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
def __A ( self: List[str] ) -> List[Any]:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = ['''longest''', '''max_length''', '''do_not_pad''']
_A = [None, 16, None]
for max_length, padding in zip(__A , __A ):
_A = feature_extractor(
__A , padding=__A , max_length=__A , return_attention_mask=__A )
_A = inputs.input_features
_A = inputs.attention_mask
_A = [np.sum(__A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self: str ) -> Dict:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = ['''longest''', '''max_length''', '''do_not_pad''']
_A = [None, 16, None]
for max_length, padding in zip(__A , __A ):
_A = feature_extractor(
__A , max_length=__A , padding=__A , return_tensors='''np''' , return_attention_mask=__A )
_A = inputs.input_features
_A = inputs.attention_mask
_A = [np.sum(__A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self: Dict ) -> int:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = feature_extractor(
__A , padding='''max_length''' , max_length=4 , truncation=__A , return_tensors='''np''' , return_attention_mask=__A , )
_A = inputs.input_features
_A = inputs.attention_mask
_A = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __A ( self: List[Any] ) -> str:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = feature_extractor(
__A , padding='''longest''' , max_length=4 , truncation=__A , return_tensors='''np''' , return_attention_mask=__A , )
_A = inputs.input_features
_A = inputs.attention_mask
_A = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = feature_extractor(
__A , padding='''longest''' , max_length=16 , truncation=__A , return_tensors='''np''' , return_attention_mask=__A , )
_A = inputs.input_features
_A = inputs.attention_mask
_A = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __A ( self: Dict ) -> List[Any]:
import torch
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = np.random.rand(1_00 , 32 ).astype(np.floataa )
_A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self: int , __A: Dict ) -> Union[str, Any]:
from datasets import load_dataset
_A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_A = ds.sort('''id''' ).select(range(__A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self: Optional[int] ) -> Dict:
# fmt: off
_A = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
_A = self._load_datasamples(1 )
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = feature_extractor(__A , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , __A , atol=1e-4 ) )
| 356 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "rag"
A_ = True
def __init__( self: Optional[int] , __A: Optional[int]=None , __A: Dict=True , __A: Any=None , __A: Dict=None , __A: Optional[int]=None , __A: Optional[Any]=None , __A: Optional[Any]=None , __A: Optional[Any]=" / " , __A: int=" // " , __A: List[Any]=5 , __A: Dict=3_00 , __A: int=7_68 , __A: Tuple=8 , __A: List[Any]="wiki_dpr" , __A: List[str]="train" , __A: Optional[Any]="compressed" , __A: Optional[int]=None , __A: Union[str, Any]=None , __A: Dict=False , __A: Tuple=False , __A: Optional[int]=0.0 , __A: Optional[int]=True , __A: int=False , __A: int=False , __A: Optional[Any]=False , __A: Optional[int]=True , __A: Optional[int]=None , **__A: Optional[Any] , ) -> Union[str, Any]:
super().__init__(
bos_token_id=__A , pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , is_encoder_decoder=__A , prefix=__A , vocab_size=__A , **__A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_A = kwargs.pop('''question_encoder''' )
_A = question_encoder_config.pop('''model_type''' )
_A = kwargs.pop('''generator''' )
_A = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_A = AutoConfig.for_model(__A , **__A )
_A = AutoConfig.for_model(__A , **__A )
_A = reduce_loss
_A = label_smoothing
_A = exclude_bos_score
_A = do_marginalize
_A = title_sep
_A = doc_sep
_A = n_docs
_A = max_combined_length
_A = dataset
_A = dataset_split
_A = index_name
_A = retrieval_vector_size
_A = retrieval_batch_size
_A = passages_path
_A = index_path
_A = use_dummy_dataset
_A = output_retrieved
_A = do_deduplication
_A = use_cache
if self.forced_eos_token_id is None:
_A = getattr(self.generator , '''forced_eos_token_id''' , __A )
@classmethod
def __A ( cls: List[Any] , __A: PretrainedConfig , __A: PretrainedConfig , **__A: Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__A )
def __A ( self: Optional[Any] ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
_A = self.question_encoder.to_dict()
_A = self.generator.to_dict()
_A = self.__class__.model_type
return output
| 75 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = 0
for ch in input_str:
UpperCamelCase__ = ord(lowerCAmelCase__ )
UpperCamelCase__ = pow(2 , lowerCAmelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ :str = logging.get_logger(__name__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = '''huggingface/label-files'''
lowercase = '''imagenet-1k-id2label.json'''
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = {v: k for k, v in idalabel.items()}
lowercase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if "stem.conv" in name:
lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowercase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowercase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowercase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
lowercase = '''bit.encoder.''' + name
return name
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
lowercase = get_config(lowerCAmelCase__ )
# load original model from timm
lowercase = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
lowercase = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase = state_dict.pop(lowerCAmelCase__ )
lowercase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
lowercase = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
lowercase = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
lowercase = transform.transforms
lowercase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowercase = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase = prepare_img()
lowercase = transform(lowerCAmelCase__ ).unsqueeze(0 )
lowercase = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
lowercase = model(lowerCAmelCase__ )
lowercase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
lowercase__ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
lowercase__ :List[str] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 101 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = set(range(3 , lowercase , 2 ) )
primes.add(2 )
for p in range(3 , lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase , lowercase ) ) )
lowerCamelCase_ = [float(lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase , limit + 1 , lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 362 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : int = logging.getLogger()
lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : List[str] , A_ : Optional[Any] ) -> int:
"""simple docstring"""
os.makedirs(A_ , exist_ok=A_ )
lowerCamelCase_ = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(A_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(A_ )
def a__ ( self : Optional[Any] , A_ : int , A_ : str = "pytorch" ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = os.path.join(A_ , 'output' )
lowerCamelCase_ = os.path.join(A_ , 'data' )
self._create_dummy_data(data_dir=A_ )
lowerCamelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A_ , env=self.get_env() )
lowerCamelCase_ = os.path.join(A_ , 'metrics.json' )
with open(A_ ) as f:
lowerCamelCase_ = json.load(A_ )
return result
@require_torch_gpu
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 208 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( __snake_case , unittest.TestCase):
__lowerCAmelCase : List[str] = MobileBertTokenizer
__lowerCAmelCase : int = MobileBertTokenizerFast
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Dict = filter_non_english
__lowerCAmelCase : str = """google/mobilebert-uncased"""
def a_ ( self : List[str] ):
"""simple docstring"""
super().setUp()
A_ : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def a_ ( self : List[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = '''UNwant\u00E9d,running'''
A_ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def a_ ( self : Dict ):
"""simple docstring"""
A_ : str = self.tokenizer_class(self.vocab_file )
A_ : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] )
def a_ ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ : Optional[Any] = self.get_tokenizer()
A_ : Optional[int] = self.get_rust_tokenizer()
A_ : Optional[Any] = '''UNwant\u00E9d,running'''
A_ : List[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
A_ : Optional[int] = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A_ : str = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
A_ : List[str] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A_ : int = self.get_rust_tokenizer()
A_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# With lower casing
A_ : Any = self.get_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
A_ : str = self.get_rust_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
A_ : Tuple = '''UNwant\u00E9d,running'''
A_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
A_ : List[str] = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A_ : Any = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
A_ : List[Any] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_rust_tokenizer()
A_ : Any = tokenizer.encode(__SCREAMING_SNAKE_CASE )
A_ : str = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a_ ( self : Any ):
"""simple docstring"""
A_ : Tuple = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : int = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Union[str, Any] = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : str = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Union[str, Any] = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A_ : int = {}
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = i
A_ : Dict = WordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def a_ ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def a_ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def a_ ( self : int ):
"""simple docstring"""
A_ : Dict = self.get_tokenizer()
A_ : str = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Any = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
A_ : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
A_ : Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
A_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def a_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : Dict = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
A_ : Any = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
A_ : List[Any] = tokenizer_r.encode_plus(
__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , )
A_ : List[Any] = tokenizer_r.do_lower_case if hasattr(__SCREAMING_SNAKE_CASE , '''do_lower_case''' ) else False
A_ : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def a_ ( self : Any ):
"""simple docstring"""
A_ : int = ['''的''', '''人''', '''有''']
A_ : Tuple = ''''''.join(__SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : List[str] = True
A_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
A_ : str = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
A_ : Dict = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
A_ : Tuple = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A_ : Any = False
A_ : Dict = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
A_ : str = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
A_ : Tuple = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
A_ : List[str] = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
A_ : int = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
A_ : Tuple = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__SCREAMING_SNAKE_CASE )
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 167 |
"""simple docstring"""
import string
def lowerCamelCase__ ( _lowerCamelCase : str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
lowerCamelCase_ = ''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCamelCase_ = string.ascii_uppercase.find(_lowerCamelCase )
lowerCamelCase_ = num - key
if num < 0:
lowerCamelCase_ = num + len(string.ascii_uppercase )
lowerCamelCase_ = translated + string.ascii_uppercase[num]
else:
lowerCamelCase_ = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def lowerCamelCase__ ( ) -> None:
lowerCamelCase_ = input('Encrypted message: ' )
lowerCamelCase_ = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 183 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCAmelCase__ :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :str = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : Dict = 'bloom'
_a : List[str] = ['past_key_values']
_a : Tuple = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self , _SCREAMING_SNAKE_CASE=250880 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('n_embed' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = pretraining_tp
_UpperCAmelCase = apply_residual_connection_post_layernorm
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = slow_but_exact
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class __a ( UpperCAmelCase ):
_a : Union[str, Any] = version.parse('1.12' )
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "default" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , task=_SCREAMING_SNAKE_CASE , patching_specs=_SCREAMING_SNAKE_CASE , use_past=_SCREAMING_SNAKE_CASE )
if not getattr(self._config , 'pad_token_id' , _SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_UpperCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='inputs' , inverted_values_shape=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def UpperCAmelCase__ ( self ) -> float:
"""simple docstring"""
return 1e-3
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = self._config.hidden_size // self.num_attention_heads
_UpperCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCAmelCase = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs['attention_mask']
if self.use_past:
_UpperCAmelCase = ordered_inputs['attention_mask'].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return 13
| 361 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ :List[str] = logging.get_logger(__name__)
class __a ( UpperCAmelCase ):
_a : Optional[int] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = size if size is not None else {'height': 256, 'width': 256}
_UpperCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_UpperCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
_UpperCAmelCase = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 185 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Dict = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
lowerCAmelCase : Dict = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
lowerCAmelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
lowerCAmelCase : Union[str, Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = DPRContextEncoderTokenizer
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCAmelCase : Optional[int] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase : Any = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
lowerCamelCase = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
lowerCamelCase = titles if not isinstance(_a , _a ) else [titles]
lowerCamelCase = texts if not isinstance(_a , _a ) else [texts]
lowerCamelCase = len(_a )
lowerCamelCase = questions if not isinstance(_a , _a ) else [questions] * n_passages
assert len(_a ) == len(
_a ), f'There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.'
lowerCamelCase = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
lowerCamelCase = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
lowerCamelCase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
lowerCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def _lowerCAmelCase ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ):
"""simple docstring"""
lowerCamelCase = reader_input["""input_ids"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase = reader_output[:3]
lowerCamelCase = len(_a )
lowerCamelCase = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
lowerCamelCase = []
for doc_id in sorted_docs:
lowerCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase = len(_a )
lowerCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCAmelCase ( self , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase = sorted(_a , key=lambda _a : x[1] , reverse=_a )
lowerCamelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
lowerCamelCase = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = DPRReaderTokenizer
| 291 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ):
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = spectrogram(
_a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCamelCase = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 291 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =list(__UpperCamelCase )
__UpperCamelCase =list(__UpperCamelCase )
__UpperCamelCase =0
for i in range(len(__UpperCamelCase ) ):
if lista[i] != lista[i]:
count += 1
__UpperCamelCase ='''_'''
if count > 1:
return False
else:
return "".join(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : list[str] ):
"""simple docstring"""
__UpperCamelCase =[]
while True:
__UpperCamelCase =['''$'''] * len(__UpperCamelCase )
__UpperCamelCase =[]
for i in range(len(__UpperCamelCase ) ):
for j in range(i + 1 , len(__UpperCamelCase ) ):
__UpperCamelCase =compare_string(binary[i] , binary[j] )
if k is False:
__UpperCamelCase ='''*'''
__UpperCamelCase ='''*'''
temp.append('''X''' )
for i in range(len(__UpperCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__UpperCamelCase ) == 0:
return pi
__UpperCamelCase =list(set(__UpperCamelCase ) )
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Sequence[float] ):
"""simple docstring"""
__UpperCamelCase =[]
for minterm in minterms:
__UpperCamelCase =''''''
for _ in range(__UpperCamelCase ):
__UpperCamelCase =str(minterm % 2 ) + string
minterm //= 2
temp.append(__UpperCamelCase )
return temp
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =list(__UpperCamelCase )
__UpperCamelCase =list(__UpperCamelCase )
__UpperCamelCase =0
for i in range(len(__UpperCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase (__UpperCamelCase : list[list[int]] , __UpperCamelCase : list[str] ):
"""simple docstring"""
__UpperCamelCase =[]
__UpperCamelCase =[0] * len(__UpperCamelCase )
for i in range(len(chart[0] ) ):
__UpperCamelCase =0
__UpperCamelCase =-1
for j in range(len(__UpperCamelCase ) ):
if chart[j][i] == 1:
count += 1
__UpperCamelCase =j
if count == 1:
__UpperCamelCase =1
for i in range(len(__UpperCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__UpperCamelCase ) ):
__UpperCamelCase =0
temp.append(prime_implicants[i] )
while True:
__UpperCamelCase =0
__UpperCamelCase =-1
__UpperCamelCase =0
for i in range(len(__UpperCamelCase ) ):
__UpperCamelCase =chart[i].count(1 )
if count_n > max_n:
__UpperCamelCase =count_n
__UpperCamelCase =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__UpperCamelCase ) ):
__UpperCamelCase =0
def lowerCAmelCase (__UpperCamelCase : list[str] , __UpperCamelCase : list[str] ):
"""simple docstring"""
__UpperCamelCase =[[0 for x in range(len(__UpperCamelCase ) )] for x in range(len(__UpperCamelCase ) )]
for i in range(len(__UpperCamelCase ) ):
__UpperCamelCase =prime_implicants[i].count('''_''' )
for j in range(len(__UpperCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __UpperCamelCase ):
__UpperCamelCase =1
return chart
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =int(input('''Enter the no. of variables\n''' ) )
__UpperCamelCase =[
float(__UpperCamelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
__UpperCamelCase =decimal_to_binary(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =check(__UpperCamelCase )
print('''Prime Implicants are:''' )
print(__UpperCamelCase )
__UpperCamelCase =prime_implicant_chart(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =selection(__UpperCamelCase , __UpperCamelCase )
print('''Essential Prime Implicants are:''' )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 85 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowercase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =data
__UpperCamelCase =None
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =None
__UpperCamelCase =None
def __iter__( self : int ) -> Iterator[Any]:
'''simple docstring'''
__UpperCamelCase =self.head
while self.head:
yield node.data
__UpperCamelCase =node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ) -> Union[str, Any]:
'''simple docstring'''
return "->".join(str(UpperCamelCase__ ) for item in iter(self ) )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =Node(UpperCamelCase__ )
if self.head is None:
__UpperCamelCase =new_node # first node points itself
__UpperCamelCase =__UpperCamelCase =new_node
elif index == 0: # insert at head
__UpperCamelCase =self.head
__UpperCamelCase =__UpperCamelCase =new_node
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =new_node
if index == len(self ) - 1: # insert at tail
__UpperCamelCase =new_node
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =self.head
if self.head == self.tail: # just one node
__UpperCamelCase =__UpperCamelCase =None
elif index == 0: # delete head node
__UpperCamelCase =self.tail.next.next
__UpperCamelCase =self.head.next
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next.next
if index == len(self ) - 1: # delete at tail
__UpperCamelCase =temp
return delete_node.data
def UpperCAmelCase_ ( self : str ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 217 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class snake_case ( __snake_case ):
# to overwrite at feature extractactor specific tests
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
@property
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "feature_size"))
self.assertTrue(hasattr(UpperCamelCase__ , "sampling_rate"))
self.assertTrue(hasattr(UpperCamelCase__ , "padding_value"))
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: int = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(UpperCamelCase__) == len(UpperCamelCase__) for x, y in zip(UpperCamelCase__ , processed_features[input_name])))
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="np")
__lowerCAmelCase: Optional[int] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: int = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs} , tensor_type="pt")
__lowerCAmelCase: str = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="tf")
__lowerCAmelCase: int = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str]=False)-> Optional[int]:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : Tuple):
__lowerCAmelCase: Optional[int] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : str , UpperCamelCase__ : str):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: List[str] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Tuple = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[int] = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase: str = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase: Tuple = self.feat_extract_tester.min_seq_length
__lowerCAmelCase: Union[str, Any] = self.feat_extract_tester.batch_size
__lowerCAmelCase: Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase: Optional[int] = feat_extract.pad(UpperCamelCase__ , padding=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest")
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[-1]))
__lowerCAmelCase: Optional[Any] = input_a[input_name]
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
__lowerCAmelCase: Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , return_tensors="np")
__lowerCAmelCase: Optional[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , pad_to_multiple_of=1_0)
__lowerCAmelCase: Tuple = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="longest" , pad_to_multiple_of=1_0)
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__)
__lowerCAmelCase: str = input_a[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: List[str] = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase__) % 1_0 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
__lowerCAmelCase: Optional[Any] = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(UpperCamelCase__) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
__lowerCAmelCase: Any = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int]=False)-> str:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : List[Any]):
__lowerCAmelCase: List[str] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
# truncate to smallest
__lowerCAmelCase: int = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Any = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]))
__lowerCAmelCase: Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to smallest with np
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np" , truncation=UpperCamelCase__ , )
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np")
__lowerCAmelCase: Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to middle
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , return_tensors="np")
__lowerCAmelCase: str = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length" , truncation=UpperCamelCase__)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Tuple = 1_2
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , truncation=UpperCamelCase__ , )
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , )
__lowerCAmelCase: str = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase: Optional[Any] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase: Optional[Any] = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
def lowercase_ ( self : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
@require_torch
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: str = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: List[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: Any = True
__lowerCAmelCase: str = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Tuple = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , UpperCamelCase__)
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: str = True
__lowerCAmelCase: int = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: int = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: List[str] = min(UpperCamelCase__)
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 217 | 1 |
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Dict = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 200 | 1 |
"""simple docstring"""
from math import pi, sqrt
def _A ( UpperCamelCase_ : float) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error")
if num > 171.5:
raise OverflowError("math range error")
elif num - int(UpperCamelCase_) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer")
elif num == 0.5:
return sqrt(UpperCamelCase_)
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1)
def _A ( ) -> None:
'''simple docstring'''
assert gamma(0.5) == sqrt(UpperCamelCase_)
assert gamma(1) == 1.0
assert gamma(2) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = 1.0
while num:
_a = float(input('Gamma of: '))
print(F"gamma({num}) = {gamma(num)}")
print('\nEnter 0 to exit...')
| 17 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
__UpperCAmelCase :Dict = namedtuple("covid_data", "cases deaths recovered")
def _a ( _lowercase : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
__UpperCAmelCase : Tuple = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_lowercase ).content ).xpath(_lowercase ) )
__UpperCAmelCase :List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats())) | 240 |
'''simple docstring'''
import requests
__UpperCAmelCase :Union[str, Any] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>") | 240 | 1 |
from __future__ import annotations
def UpperCAmelCase_ ( _A ): # This function is recursive
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(_A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE__ = array[0]
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE__ = longest_subsequence(_A )
if len(_A ) > len(_A ):
SCREAMING_SNAKE_CASE__ = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE__ = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE__ = [pivot, *longest_subsequence(_A )]
if len(_A ) > len(_A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314 | 1 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Dict =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 304 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = field(default=__a , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ = field(
default=__a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ = field(
default=__a , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ = field(
default=__a , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ = field(
default=__a , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = super().to_dict()
for k, v in d.items():
if isinstance(a_, a_ ):
_snake_case : int = v.to_dict()
return d
| 64 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_a : List[Any] = set(range(3 , __a , 2 ) )
primes.add(2 )
for p in range(3 , __a , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __a , __a ) ) )
_a : Optional[int] = [float(__a ) for n in range(limit + 1 )]
for p in primes:
for n in range(__a , limit + 1 , __a ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = {"facebook/bart-base": BartForConditionalGeneration}
lowerCamelCase_ = {"facebook/bart-base": BartTokenizer}
def __lowerCamelCase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Optional[Any] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=a_ , default=a_ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=a_ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=a_ , default=a_ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=a_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=a_ , )
parser.add_argument(
'''--config_name''' , type=a_ , default=a_ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=a_ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=a_ , default=a_ , help='''Where to store the final ONNX file.''' )
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
return args
def __lowerCamelCase ( a_ : Optional[Any] , a_ : List[Any]="cpu" ) -> str:
__SCREAMING_SNAKE_CASE :Tuple = model_dict[model_name].from_pretrained(a_ ).to(a_ )
__SCREAMING_SNAKE_CASE :Any = tokenizer_dict[model_name].from_pretrained(a_ )
if model_name in ["facebook/bart-base"]:
__SCREAMING_SNAKE_CASE :Dict = 0
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Optional[int] = 0
return huggingface_model, tokenizer
def __lowerCamelCase ( a_ : Dict , a_ : List[Any] , a_ : Tuple , a_ : List[Any] , a_ : Tuple ) -> Optional[int]:
model.eval()
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :Tuple = torch.jit.script(BARTBeamSearchGenerator(a_ ) )
with torch.no_grad():
__SCREAMING_SNAKE_CASE :List[Any] = '''My friends are cool but they eat too many carbs.'''
__SCREAMING_SNAKE_CASE :Any = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='''pt''' ).to(model.device )
__SCREAMING_SNAKE_CASE :int = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=a_ , max_length=a_ , early_stopping=a_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
a_ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , a_ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=a_ , )
logger.info('''Model exported to {}'''.format(a_ ) )
__SCREAMING_SNAKE_CASE :str = remove_dup_initializers(os.path.abspath(a_ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(a_ ) )
__SCREAMING_SNAKE_CASE :Optional[Any] = onnxruntime.InferenceSession(a_ )
__SCREAMING_SNAKE_CASE :Tuple = ort_sess.run(
a_ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(a_ ),
'''max_length''': np.array(a_ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def __lowerCamelCase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE :int = parse_args()
__SCREAMING_SNAKE_CASE :List[str] = 5
__SCREAMING_SNAKE_CASE :str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.device(args.device )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :int = load_model_tokenizer(args.model_name_or_path , a_ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(a_ )
if args.max_length:
__SCREAMING_SNAKE_CASE :List[str] = args.max_length
if args.num_beams:
__SCREAMING_SNAKE_CASE :Tuple = args.num_beams
if args.output_file_path:
__SCREAMING_SNAKE_CASE :List[str] = args.output_file_path
else:
__SCREAMING_SNAKE_CASE :List[str] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(a_ , a_ , a_ , a_ , a_ )
if __name__ == "__main__":
main() | 191 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def __lowerCamelCase ( a_ : SplitDict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Tuple = split_dict._to_yaml_list()
assert len(a_ ) == len(a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = SplitDict._from_yaml_list(a_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__SCREAMING_SNAKE_CASE :int = None
# the split name of split_dict takes over the name of the split info object
__SCREAMING_SNAKE_CASE :Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=a_ ), SplitInfo(dataset_name='''my_dataset''' )] )
def __lowerCamelCase ( a_ : Dict ) -> Any:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
__SCREAMING_SNAKE_CASE :Union[str, Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 191 | 1 |
from ...configuration_utils import PretrainedConfig
UpperCamelCase__ : Tuple = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = 'tapas'
def __init__( self : Optional[int] ,__lowerCamelCase : List[Any]=3_05_22 ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : int=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : str="gelu" ,__lowerCamelCase : Union[str, Any]=0.1 ,__lowerCamelCase : Union[str, Any]=0.1 ,__lowerCamelCase : Union[str, Any]=10_24 ,__lowerCamelCase : int=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : str=0 ,__lowerCamelCase : Dict=10.0 ,__lowerCamelCase : Tuple=0 ,__lowerCamelCase : Dict=1.0 ,__lowerCamelCase : str=None ,__lowerCamelCase : List[Any]=1.0 ,__lowerCamelCase : Union[str, Any]=False ,__lowerCamelCase : Any=None ,__lowerCamelCase : Optional[Any]=1.0 ,__lowerCamelCase : Dict=1.0 ,__lowerCamelCase : Dict=False ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : List[Any]="ratio" ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : str=None ,__lowerCamelCase : Dict=64 ,__lowerCamelCase : str=32 ,__lowerCamelCase : List[Any]=False ,__lowerCamelCase : int=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : Any=False ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Dict=False ,__lowerCamelCase : int=None ,__lowerCamelCase : Optional[int]=None ,**__lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=__a ,**__a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_sizes
a = initializer_range
a = layer_norm_eps
# Fine-tuning task hyperparameters
a = positive_label_weight
a = num_aggregation_labels
a = aggregation_loss_weight
a = use_answer_as_supervision
a = answer_loss_importance
a = use_normalized_answer_loss
a = huber_loss_delta
a = temperature
a = aggregation_temperature
a = use_gumbel_for_cells
a = use_gumbel_for_aggregation
a = average_approximation_function
a = cell_selection_preference
a = answer_loss_cutoff
a = max_num_rows
a = max_num_columns
a = average_logits_per_cell
a = select_one_column
a = allow_empty_column_selection
a = init_cell_selection_weights_to_zero
a = reset_position_index_per_cell
a = disable_per_token_loss
# Aggregation hyperparameters
a = aggregation_labels
a = no_aggregation_label_index
if isinstance(self.aggregation_labels ,__a ):
a = {int(__a ): v for k, v in aggregation_labels.items()}
| 350 |
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = ''''''
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
a = ''''''
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a = int('''0b''' + data[0] + data[-1], 2 )
a = int('''0b''' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = message[:4]
a = message[4:]
a = apply_table(snake_case_, snake_case_ )
a = xor(snake_case_, snake_case_ )
a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741
a = apply_sbox(snake_case_, temp[4:] )
a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741
a = '''0''' * (2 - len(snake_case_ )) + r
a = apply_table(l + r, snake_case_ )
a = xor(snake_case_, snake_case_ )
return temp + right
if __name__ == "__main__":
UpperCamelCase__ : int = input("""Enter 10 bit key: """)
UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """)
UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCamelCase__ : Optional[int] = [2, 4, 3, 1]
UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table)
UpperCamelCase__ : str = temp[:5]
UpperCamelCase__ : List[Any] = temp[5:]
UpperCamelCase__ : Dict = left_shift(left)
UpperCamelCase__ : Any = left_shift(right)
UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : int = left_shift(right)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : Dict = left_shift(right)
UpperCamelCase__ : List[str] = apply_table(left + right, pa_table)
# encryption
UpperCamelCase__ : Tuple = apply_table(message, IP)
UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4]
UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Tuple = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP)
UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4]
UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 330 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=3,out_channels=3,down_block_types=("DownBlock2D", "AttnDownBlock2D"),up_block_types=("AttnUpBlock2D", "UpBlock2D"),)
return model
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : Optional[Any] = PNDMScheduler()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PNDMPipeline(unet=_A,scheduler=_A )
pndm.to(_A )
pndm.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pndm(generator=_A,num_inference_steps=20,output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pndm(generator=_A,num_inference_steps=20,output_type="numpy",return_dict=_A )[0]
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "google/ddpm-cifar10-32"
SCREAMING_SNAKE_CASE_ : Dict = UNetaDModel.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : int = PNDMScheduler()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PNDMPipeline(unet=_A,scheduler=_A )
pndm.to(_A )
pndm.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = pndm(generator=_A,output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 18 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[torch.FloatTensor] = None
_lowerCamelCase : torch.FloatTensor = None
_lowerCamelCase : Optional[Tuple[torch.FloatTensor]] = None
_lowerCamelCase : Optional[Tuple[torch.FloatTensor]] = None
class A_ ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , snake_case_ : Optional[Any]=1 , snake_case_ : Union[str, Any]=0 , snake_case_ : Union[str, Any]=2 , snake_case_ : Tuple=5_1_2 , snake_case_ : Tuple="cls" , snake_case_ : List[str]=False , snake_case_ : Any=True , **snake_case_ : Optional[int] , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_UpperCAmelCase = project_dim
_UpperCAmelCase = pooler_fn
_UpperCAmelCase = learn_encoder
_UpperCAmelCase = use_attention_mask
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = [R"""pooler""", R"""logit_scale"""]
_lowerCamelCase : List[Any] = [R"""position_ids""", R"""predictions.decoder.bias"""]
_lowerCamelCase : Optional[Any] = """roberta"""
_lowerCamelCase : List[str] = RobertaSeriesConfig
def __init__( self : List[Any] , snake_case_ : Any ):
super().__init__(snake_case_ )
_UpperCAmelCase = XLMRobertaModel(snake_case_ )
_UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase = getattr(snake_case_ , "has_pre_transformation" , snake_case_ )
if self.has_pre_transformation:
_UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.base_model(
input_ids=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_attentions=snake_case_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=snake_case_ , )
if self.has_pre_transformation:
_UpperCAmelCase = outputs["hidden_states"][-2]
_UpperCAmelCase = self.pre_LN(snake_case_ )
_UpperCAmelCase = self.transformation_pre(snake_case_ )
return TransformationModelOutput(
projection_state=snake_case_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCAmelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=snake_case_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 156 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Dict=True , __lowercase : Dict="pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(__lowercase , __lowercase ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=__lowercase , padding="max_length" if pad_to_max_length else None , truncation=__lowercase , return_tensors=__lowercase , add_special_tokens=__lowercase , **__lowercase , )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple=None , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(__lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , snake_case_ : int , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : int="train" , snake_case_ : Tuple=None , snake_case_ : str=None , snake_case_ : Optional[Any]=None , snake_case_ : Any="" , ):
super().__init__()
_UpperCAmelCase = Path(snake_case_ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case_ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : List[Any] ):
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case_ : List[Any] ):
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case_ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case_ ).rstrip("\n" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case_ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case_ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case_ , snake_case_ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case_ , snake_case_ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( snake_case_ : Optional[Any] ):
return [len(snake_case_ ) for x in Path(snake_case_ ).open().readlines()]
def lowercase ( self : List[str] , snake_case_ : Optional[int] ):
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case_ , snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case_ , snake_case_ , attention_mask=snake_case_ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__SCREAMING_SNAKE_CASE :Union[str, Any] = getLogger(__name__)
def UpperCAmelCase_ ( __lowercase : List[List] ) -> List[Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(__lowercase , os.path.join(__lowercase , "git_log.json" ) )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : Union[str, Any]=4 , **__lowercase : Any ) -> int:
'''simple docstring'''
with open(__lowercase , "w" ) as f:
json.dump(__lowercase , __lowercase , indent=__lowercase , **__lowercase )
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
with open(__lowercase ) as f:
return json.load(__lowercase )
def UpperCAmelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=__lowercase )
_UpperCAmelCase = {
"repo_id": str(__lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __lowercase : Callable , __lowercase : Iterable ) -> List:
'''simple docstring'''
return list(map(__lowercase , __lowercase ) )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : str ) -> List[Any]:
'''simple docstring'''
with open(__lowercase , "wb" ) as f:
return pickle.dump(__lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : str ) -> int:
'''simple docstring'''
def remove_articles(__lowercase : Union[str, Any] ):
return re.sub(r"\b(a|an|the)\b" , " " , __lowercase )
def white_space_fix(__lowercase : str ):
return " ".join(text.split() )
def remove_punc(__lowercase : Union[str, Any] ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(__lowercase ).split()
_UpperCAmelCase = normalize_answer(__lowercase ).split()
_UpperCAmelCase = Counter(__lowercase ) & Counter(__lowercase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(__lowercase )
_UpperCAmelCase = 1.0 * num_same / len(__lowercase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Optional[Any] ) -> int:
'''simple docstring'''
return normalize_answer(__lowercase ) == normalize_answer(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : List[str] ) -> Dict:
'''simple docstring'''
assert len(__lowercase ) == len(__lowercase )
_UpperCAmelCase = 0
for hypo, pred in zip(__lowercase , __lowercase ):
em += exact_match_score(__lowercase , __lowercase )
if len(__lowercase ) > 0:
em /= len(__lowercase )
return {"em": em}
def UpperCAmelCase_ ( __lowercase : Tuple ) -> List[str]:
'''simple docstring'''
return model_prefix.startswith("rag" )
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : int , __lowercase : int ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(__lowercase , __lowercase , __lowercase ):
if not hasattr(__lowercase , __lowercase ) and not hasattr(__lowercase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowercase ) )
delattr(__lowercase , __lowercase )
continue
_UpperCAmelCase = p if hasattr(__lowercase , __lowercase ) else equivalent_param[p]
setattr(__lowercase , __lowercase , getattr(__lowercase , __lowercase ) )
delattr(__lowercase , __lowercase )
return hparams, config
| 156 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = LDMTextToImagePipeline
__UpperCAmelCase : str = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
__UpperCAmelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : List[str] = False
def _lowercase ( self : str ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, )
__lowercase = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=UpperCAmelCase__, set_alpha_to_one=UpperCAmelCase__, )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=(3_2, 6_4), in_channels=3, out_channels=3, down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), latent_channels=4, )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
__lowercase = CLIPTextModel(UpperCAmelCase__ )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowercase = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any]=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
__lowercase = torch.manual_seed(UpperCAmelCase__ )
else:
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = LDMTextToImagePipeline(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs(UpperCAmelCase__ )
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
__lowercase = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict=torch.floataa, UpperCAmelCase__ : Dict=0 ):
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 3_2, 3_2) )
__lowercase = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Tuple ):
__lowercase = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_inputs(UpperCAmelCase__ )
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
__lowercase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[str]=torch.floataa, UpperCAmelCase__ : List[str]=0 ):
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 3_2, 3_2) )
__lowercase = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 5_0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
__lowercase = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_inputs(UpperCAmelCase__ )
__lowercase = pipe(**UpperCAmelCase__ ).images[0]
__lowercase = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
__lowercase = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 17 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None) -> List[Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
a = nn.Parameter(__UpperCamelCase)
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
a = nn.Parameter(__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> List[Any]:
# set torch weights for 1-to-1 comparison
a = np.asarray(weights[0])
a = np.asarray(weights[1])
a = np.asarray(weights[2])
set_param(
torch_layer.self_attention.query_key , torch.tensor(__UpperCamelCase).transpose(1 , 2).contiguous().view(-1 , __UpperCamelCase) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__UpperCamelCase).transpose(1 , 2).contiguous().view(-1 , __UpperCamelCase) , )
set_param(
torch_layer.output.dense , torch.tensor(__UpperCamelCase).view(-1 , __UpperCamelCase).contiguous().transpose(0 , 1) , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Dict:
# set torch weights for 1-to-1 comparison
a = np.asarray(weights[0])
a = np.asarray(weights[1])
a = np.asarray(weights[2])
a = np.asarray(weights[3])
set_param(
torch_layer.self_attention.query , torch.tensor(__UpperCamelCase).transpose(1 , 2).contiguous().view(-1 , __UpperCamelCase) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__UpperCamelCase).transpose(1 , 2).contiguous().view(-1 , __UpperCamelCase) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__UpperCamelCase).transpose(1 , 2).contiguous().view(-1 , __UpperCamelCase) , )
set_param(
torch_layer.output.dense , torch.tensor(__UpperCamelCase).view(-1 , __UpperCamelCase).contiguous().transpose(0 , 1) , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Tuple:
# layernorm 1
a = weights[0][0][0]
a = np.asarray(layer_norm_a[0])
a = np.asarray(layer_norm_a[1])
set_param(
torch_block.attention.layer_norm , torch.tensor(__UpperCamelCase) , torch.tensor(__UpperCamelCase) , )
# lsh weights + output
a = weights[0][1]
if len(__UpperCamelCase) < 4:
set_layer_weights_in_torch_lsh(__UpperCamelCase , torch_block.attention , __UpperCamelCase)
else:
set_layer_weights_in_torch_local(__UpperCamelCase , torch_block.attention , __UpperCamelCase)
# intermediate weighs
a = weights[2][0][1][2]
# Chunked Feed Forward
if len(__UpperCamelCase) == 4:
a = intermediate_weights[2]
# layernorm 2
a = np.asarray(intermediate_weights[0][0])
a = np.asarray(intermediate_weights[0][1])
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__UpperCamelCase) , torch.tensor(__UpperCamelCase) , )
# intermediate dense
a = np.asarray(intermediate_weights[1][0])
a = np.asarray(intermediate_weights[1][1])
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__UpperCamelCase).transpose(0 , 1).contiguous() , torch.tensor(__UpperCamelCase) , )
# intermediate out
a = np.asarray(intermediate_weights[4][0])
a = np.asarray(intermediate_weights[4][1])
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__UpperCamelCase).transpose(0 , 1).contiguous() , torch.tensor(__UpperCamelCase) , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> List[Any]:
# reformer model
a = torch_model.reformer
# word embeds
a = np.asarray(weights[1])
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__UpperCamelCase) , )
if isinstance(weights[3] , __UpperCamelCase):
a = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights)):
a = np.asarray(weights[3][emb_idx][0])
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
a = nn.Parameter(torch.tensor(__UpperCamelCase))
a = weights[5]
assert len(torch_model_reformer.encoder.layers) * 4 == len(
__UpperCamelCase), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers):
a = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# output layer norm
a = np.asarray(weights[7][0])
a = np.asarray(weights[7][1])
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__UpperCamelCase) , torch.tensor(__UpperCamelCase) , )
# output embeddings
a = np.asarray(weights[9][0])
a = np.asarray(weights[9][1])
set_param(
torch_model.lm_head.decoder , torch.tensor(__UpperCamelCase).transpose(0 , 1).contiguous() , torch.tensor(__UpperCamelCase) , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Union[str, Any]:
# Initialise PyTorch model
a = ReformerConfig.from_json_file(__UpperCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
a = ReformerModelWithLMHead(__UpperCamelCase)
with open(__UpperCamelCase , "rb") as f:
a = pickle.load(__UpperCamelCase)["weights"]
set_model_weights_in_torch(__UpperCamelCase , __UpperCamelCase , config.hidden_size)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , __UpperCamelCase)
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 180 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list[int]:
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase)
if n > 1:
factors.append(__UpperCamelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if len(snake_case__ ) != 2 or len(a[0] ) != 2 or len(snake_case__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
snake_case_ : List[Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def SCREAMING_SNAKE_CASE__ ( __a ):
if len(snake_case__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
snake_case_ : Optional[Any] = len(snake_case__ )
snake_case_ : List[str] = matrix_length // 2
snake_case_ : Union[str, Any] = [[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ )]
snake_case_ : Optional[Any] = [
[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ , snake_case__ )
]
snake_case_ : int = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ )]
snake_case_ : Union[str, Any] = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ , snake_case__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( __a ):
return len(snake_case__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( __a ):
print('\n'.join(str(snake_case__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if matrix_dimensions(snake_case__ ) == (2, 2):
return default_matrix_multiplication(snake_case__ , snake_case__ )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Union[str, Any] = split_matrix(snake_case__ )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : List[str] = split_matrix(snake_case__ )
snake_case_ : Optional[Any] = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
snake_case_ : Union[str, Any] = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
snake_case_ : Optional[int] = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
snake_case_ : List[Any] = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
snake_case_ : Tuple = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
snake_case_ : Optional[Any] = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
snake_case_ : List[Any] = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
snake_case_ : Union[str, Any] = matrix_addition(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
snake_case_ : str = matrix_addition(snake_case__ , snake_case__ )
snake_case_ : Tuple = matrix_addition(snake_case__ , snake_case__ )
snake_case_ : List[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
# construct the new matrix from our 4 quadrants
snake_case_ : int = []
for i in range(len(snake_case__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(snake_case__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if matrix_dimensions(snake_case__ )[1] != matrix_dimensions(snake_case__ )[0]:
snake_case_ : Tuple = (
'Unable to multiply these matrices, please check the dimensions.\n'
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(snake_case__ )
snake_case_ : Union[str, Any] = matrix_dimensions(snake_case__ )
snake_case_ : Dict = matrix_dimensions(snake_case__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case_ : List[Any] = max(*snake_case__ , *snake_case__ )
snake_case_ : int = int(math.pow(2 , math.ceil(math.loga(snake_case__ ) ) ) )
snake_case_ : Optional[Any] = matrixa
snake_case_ : str = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case_ : Any = actual_strassen(snake_case__ , snake_case__ )
# Removing the additional zeros
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_SCREAMING_SNAKE_CASE = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 327 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : List[Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 0 |
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = len(lowercase )
__lowercase = []
for i in range(len(lowercase ) - pat_len + 1 ):
__lowercase = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
__lowercase = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 210 | # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 210 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : List[Any] = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 313 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''image''': Image()} )
lowerCamelCase__ = Features({'''labels''': ClassLabel} )
lowerCamelCase__ = "image"
lowerCamelCase__ = "labels"
def __A ( self : Dict , __magic_name__ : Union[str, Any] ) -> Any:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __magic_name__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ = self.label_schema.copy()
SCREAMING_SNAKE_CASE_ = features[self.label_column]
SCREAMING_SNAKE_CASE_ = label_schema
return task_template
@property
def __A ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 118 | import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = "utf-8"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 1_0 << 2_0 # 10MB
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = JsonConfig
def __A ( self : Optional[int] ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
SCREAMING_SNAKE_CASE_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : List[str] , __magic_name__ : str ) -> Tuple:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : str , __magic_name__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(__magic_name__ ).type
SCREAMING_SNAKE_CASE_ = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def __A ( self : List[str] , __magic_name__ : List[str] ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("utf-8" )
try:
while True:
try:
SCREAMING_SNAKE_CASE_ = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 118 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = VideoToVideoSDPipeline
UpperCAmelCase__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'}) - {'image', 'width', 'height'}
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'}) - {'image'}
UpperCAmelCase__ : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
UpperCAmelCase__ : Optional[int] = False
# No `output_type`.
UpperCAmelCase__ : Union[str, Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any]=0 ):
# 3 frames
__lowerCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = VideoToVideoSDPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = """np"""
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).frames
__lowerCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__lowerCamelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: str ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ , expected_max_diff=5E-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
def lowerCAmelCase__ ( self: str ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__lowerCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCamelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=UpperCamelCase_ )
__lowerCamelCase = video.to("""cuda""" )
__lowerCamelCase = """Spiderman is surfing"""
__lowerCamelCase = pipe(UpperCamelCase_ , video=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=3 , output_type="""pt""" ).frames
__lowerCamelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 353 |
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case : int = logging.get_logger(__name__)
snake_case : Optional[int] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case : str = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case : List[str] = {'''facebook/blenderbot-3B''': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowercase ( ):
a__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
a__ = bs[:]
a__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
a__ = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : str ):
a__ = set()
a__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ = char
return pairs
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :List[Any] ,__snake_case :Optional[Any]="replace" ,__snake_case :Tuple="<s>" ,__snake_case :List[str]="</s>" ,__snake_case :List[Any]="</s>" ,__snake_case :Optional[int]="<s>" ,__snake_case :str="<unk>" ,__snake_case :Tuple="<pad>" ,__snake_case :str="<mask>" ,__snake_case :List[str]=False ,**__snake_case :Tuple ,) -> str:
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else bos_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else eos_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else sep_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else cls_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else unk_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else mask_token
super().__init__(
errors=__snake_case ,bos_token=__snake_case ,eos_token=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,cls_token=__snake_case ,pad_token=__snake_case ,mask_token=__snake_case ,add_prefix_space=__snake_case ,**__snake_case ,)
with open(__snake_case ,encoding='utf-8' ) as vocab_handle:
a__ = json.load(__snake_case )
a__ = {v: k for k, v in self.encoder.items()}
a__ = errors # how to handle errors in decoding
a__ = bytes_to_unicode()
a__ = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case ,encoding='utf-8' ) as merges_handle:
a__ = merges_handle.read().split('\n' )[1:-1]
a__ = [tuple(merge.split() ) for merge in bpe_merges]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = {}
a__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
return len(self.encoder )
def lowerCamelCase__( self :Any ) -> int:
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCamelCase__( self :Dict ,__snake_case :Optional[Any] ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
a__ = tuple(__snake_case )
a__ = get_pairs(__snake_case )
if not pairs:
return token
while True:
a__ = min(__snake_case ,key=lambda __snake_case : self.bpe_ranks.get(__snake_case ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ = bigram
a__ = []
a__ = 0
while i < len(__snake_case ):
try:
a__ = word.index(__snake_case ,__snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ = tuple(__snake_case )
a__ = new_word
if len(__snake_case ) == 1:
break
else:
a__ = get_pairs(__snake_case )
a__ = ' '.join(__snake_case )
a__ = word
return word
def lowerCamelCase__( self :Any ,__snake_case :Any ) -> Tuple:
a__ = []
for token in re.findall(self.pat ,__snake_case ):
a__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(' ' ) )
return bpe_tokens
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[str] ) -> Any:
return self.encoder.get(__snake_case ,self.encoder.get(self.unk_token ) )
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[int] ) -> Optional[int]:
return self.decoder.get(__snake_case )
def lowerCamelCase__( self :List[str] ,__snake_case :Dict ) -> List[Any]:
a__ = ''.join(__snake_case )
a__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def lowerCamelCase__( self :int ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__snake_case ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__snake_case ,ensure_ascii=__snake_case ) + '\n' )
a__ = 0
with open(__snake_case ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
a__ = token_index
writer.write(' '.join(__snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ,__snake_case :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case ,token_ids_a=__snake_case ,already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def lowerCamelCase__( self :Any ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__( self :str ,__snake_case :List[Any] ,__snake_case :Dict=False ,**__snake_case :Optional[int] ) -> List[str]:
a__ = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
a__ = ' ' + text
return (text, kwargs)
def lowerCamelCase__( self :str ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> str:
return token_ids_a + [self.eos_token_id]
def lowerCamelCase__( self :Dict ,__snake_case :"Conversation" ) -> List[int]:
a__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__snake_case )
a__ = ' '.join(__snake_case )
a__ = self.encode(__snake_case )
if len(__snake_case ) > self.model_max_length:
a__ = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 240 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowercase ( ):
print('Making key files...' )
make_key_files('rsa' , 1_0_2_4 )
print('Key files generation successful.' )
def __lowercase ( __lowerCAmelCase : int ):
print('Generating prime p...' )
a__ = rabinMiller.generate_large_prime(__lowerCAmelCase )
print('Generating prime q...' )
a__ = rabinMiller.generate_large_prime(__lowerCAmelCase )
a__ = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
a__ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__lowerCAmelCase , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
a__ = cryptoMath.find_mod_inverse(__lowerCAmelCase , (p - 1) * (q - 1) )
a__ = (n, e)
a__ = (n, d)
return (public_key, private_key)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
a__ , a__ = generate_key(__lowerCAmelCase )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 240 | 1 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
def __init__( self ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['bs4'] )
super().__init__(**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
lowercase_ : int = []
lowercase_ : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase_ : Optional[int] = parent.find_all(child.name ,recursive=__UpperCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__UpperCamelCase ) else next(i for i, s in enumerate(__UpperCamelCase ,1 ) if s is child ) )
lowercase_ : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = BeautifulSoup(__UpperCamelCase ,'html.parser' )
lowercase_ : List[Any] = []
lowercase_ : List[str] = []
lowercase_ : Optional[Any] = []
for element in html_code.descendants:
if type(__UpperCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase_ : Dict = html.unescape(__UpperCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__UpperCamelCase )
lowercase_ , lowercase_ : str = self.xpath_soup(__UpperCamelCase )
stringaxtag_seq.append(__UpperCamelCase )
stringaxsubs_seq.append(__UpperCamelCase )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = ''
for tagname, subs in zip(__UpperCamelCase ,__UpperCamelCase ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self ,__UpperCamelCase ) -> BatchFeature:
'''simple docstring'''
lowercase_ : Optional[int] = False
# Check that strings has a valid type
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Optional[Any] = True
elif isinstance(__UpperCamelCase ,(list, tuple) ):
if len(__UpperCamelCase ) == 0 or isinstance(html_strings[0] ,__UpperCamelCase ):
lowercase_ : Tuple = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f'''but is of type {type(__UpperCamelCase )}.''' )
lowercase_ : Optional[Any] = bool(isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(html_strings[0] ,__UpperCamelCase )) )
if not is_batched:
lowercase_ : Any = [html_strings]
# Get nodes + xpaths
lowercase_ : int = []
lowercase_ : Tuple = []
for html_string in html_strings:
lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.get_three_from_single(__UpperCamelCase )
nodes.append(__UpperCamelCase )
lowercase_ : Optional[int] = []
for node, tag_list, sub_list in zip(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Tuple = self.construct_xpath(__UpperCamelCase ,__UpperCamelCase )
xpath_strings.append(__UpperCamelCase )
xpaths.append(__UpperCamelCase )
# return as Dict
lowercase_ : Dict = {'nodes': nodes, 'xpaths': xpaths}
lowercase_ : int = BatchFeature(data=__UpperCamelCase ,tensor_type=__UpperCamelCase )
return encoded_inputs
| 321 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321 | 1 |
'''simple docstring'''
_A : List[str] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Dict = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCamelCase )
lowerCamelCase__ : int = """""".join(bin(UpperCamelCase )[2:].zfill(8 ) for byte in data )
lowerCamelCase__ : int = len(UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCamelCase__ : Optional[Any] = b"""=""" * ((6 - len(UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCamelCase ) % 6)
else:
lowerCamelCase__ : Optional[int] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : int = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCamelCase , UpperCamelCase ):
try:
lowerCamelCase__ : str = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowerCamelCase__ : Tuple = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCamelCase__ : Any = encoded_data[:-padding]
lowerCamelCase__ : int = """""".join(
bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCamelCase__ : Tuple = """""".join(
bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
lowerCamelCase__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCamelCase ) , 8 )
]
return bytes(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
import numpy as np
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (0, 0)
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : List[Any] = 0
def __eq__( self : Optional[int] , a_ : List[Any] ):
'''simple docstring'''
return self.position == cell.position
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
print(self.position )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a_ : Dict=(5, 5) ):
'''simple docstring'''
__UpperCAmelCase : str = np.zeros(a_ )
__UpperCAmelCase : List[Any] = world_size[0]
__UpperCAmelCase : List[str] = world_size[1]
def snake_case__ ( self : Tuple ):
'''simple docstring'''
print(self.w )
def snake_case__ ( self : Union[str, Any] , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__UpperCAmelCase : str = cell.position[0]
__UpperCAmelCase : List[Any] = cell.position[1]
__UpperCAmelCase : str = []
for n in neughbour_cord:
__UpperCAmelCase : Optional[Any] = current_x + n[0]
__UpperCAmelCase : Any = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__UpperCAmelCase : int = Cell()
__UpperCAmelCase : List[str] = (x, y)
__UpperCAmelCase : List[Any] = cell
neighbours.append(a_ )
return neighbours
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[Any] = []
_open.append(_UpperCAmelCase )
while _open:
__UpperCAmelCase : str = np.argmin([n.f for n in _open] )
__UpperCAmelCase : Optional[Any] = _open[min_f]
_closed.append(_open.pop(_UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(_UpperCAmelCase ):
for c in _closed:
if c == n:
continue
__UpperCAmelCase : Optional[Any] = current.g + 1
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = n.position
__UpperCAmelCase , __UpperCAmelCase : Tuple = goal.position
__UpperCAmelCase : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
__UpperCAmelCase : Optional[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_UpperCAmelCase )
__UpperCAmelCase : str = []
while current.parent is not None:
path.append(current.position )
__UpperCAmelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A =Gridworld()
# Start position and goal
__A =Cell()
__A =(0, 0)
__A =Cell()
__A =(4, 4)
print(f'''path from {start.position} to {goal.position}''')
__A =astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A =1
print(world.w)
| 226 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
A_ = 0.0
for coeff in reversed(UpperCAmelCase__ ):
A_ = result * x + coeff
return result
if __name__ == "__main__":
__lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
__lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 101 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if not postfix_notation:
return 0
A_ = {"""+""", """-""", """*""", """/"""}
A_ = []
for token in postfix_notation:
if token in operations:
A_ , A_ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 1 |
def A (__A : float ) -> float:
"""simple docstring"""
return 10 - x * x
def A (__A : float , __A : float ) -> float:
"""simple docstring"""
if equation(__A ) * equation(__A ) >= 0:
raise ValueError('''Wrong space!''' )
UpperCAmelCase_ = a
while (b - a) >= 0.01:
# Find middle point
UpperCAmelCase_ = (a + b) / 2
# Check if middle point is root
if equation(__A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__A ) * equation(__A ) < 0:
UpperCAmelCase_ = c
else:
UpperCAmelCase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 51 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321 | """simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.