code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = "dandelin/vilt-b32-finetuned-vqa"
_UpperCamelCase : int = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
_UpperCamelCase : int = "image_qa"
_UpperCamelCase : Dict = AutoProcessor
_UpperCamelCase : Tuple = AutoModelForVisualQuestionAnswering
_UpperCamelCase : str = ["image", "text"]
_UpperCamelCase : List[Any] = ["text"]
def __init__( self : Tuple , *lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : "Image" , lowerCamelCase_ : str ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int ):
'''simple docstring'''
with torch.no_grad():
return self.model(**lowerCamelCase_ ).logits
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa )
_snake_case : Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase_ , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa )
_snake_case : str = controlnet_params
_snake_case : Optional[Any] = 'bird'
_snake_case : List[Any] = jax.device_count()
_snake_case : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_snake_case : List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
_snake_case : Optional[Any] = jax.random.PRNGKey(0 )
_snake_case : str = jax.random.split(lowerCamelCase_ , jax.device_count() )
_snake_case : int = replicate(lowerCamelCase_ )
_snake_case : Optional[Any] = shard(lowerCamelCase_ )
_snake_case : List[str] = shard(lowerCamelCase_ )
_snake_case : List[str] = pipe(
prompt_ids=lowerCamelCase_ , image=lowerCamelCase_ , params=lowerCamelCase_ , prng_seed=lowerCamelCase_ , num_inference_steps=50 , jit=lowerCamelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_snake_case : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 2_53:2_56, 2_53:2_56, -1]
_snake_case : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : int = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa )
_snake_case : str = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase_ , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa )
_snake_case : List[Any] = controlnet_params
_snake_case : Optional[int] = 'Chef in the kitchen'
_snake_case : str = jax.device_count()
_snake_case : str = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_snake_case : int = pipe.prepare_image_inputs([pose_image] * num_samples )
_snake_case : str = jax.random.PRNGKey(0 )
_snake_case : Any = jax.random.split(lowerCamelCase_ , jax.device_count() )
_snake_case : Union[str, Any] = replicate(lowerCamelCase_ )
_snake_case : Any = shard(lowerCamelCase_ )
_snake_case : Any = shard(lowerCamelCase_ )
_snake_case : Tuple = pipe(
prompt_ids=lowerCamelCase_ , image=lowerCamelCase_ , params=lowerCamelCase_ , prng_seed=lowerCamelCase_ , num_inference_steps=50 , jit=lowerCamelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
_snake_case : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
from __future__ import annotations
from math import pow, sqrt
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowerCAmelCase , 2 ) + pow(__lowerCAmelCase , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase_ : Union[str, Any] = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
lowercase_ : int = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
lowercase_ : Dict = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
_snake_case : Dict = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
_snake_case : List[str] = evaluate(dataset=lowerCamelCase_ , predictions=lowerCamelCase_ )
return score
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : torch.FloatTensor
class lowercase ( a_ , a_ ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = 1
@register_to_config
def __init__( self : Union[str, Any] , lowerCamelCase_ : int = 20_00 , lowerCamelCase_ : float = 0.15 , lowerCamelCase_ : float = 0.01 , lowerCamelCase_ : float = 1348.0 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : int = 1 , ):
'''simple docstring'''
_snake_case : Dict = sigma_max
# setable values
_snake_case : Union[str, Any] = None
self.set_sigmas(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int] = None ):
'''simple docstring'''
return sample
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : float = None , lowerCamelCase_ : Union[str, torch.device] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_snake_case : Optional[int] = torch.linspace(1 , lowerCamelCase_ , lowerCamelCase_ , device=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : float = None , lowerCamelCase_ : float = None , lowerCamelCase_ : float = None ):
'''simple docstring'''
_snake_case : Dict = sigma_min if sigma_min is not None else self.config.sigma_min
_snake_case : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_snake_case : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_snake_case : Tuple = torch.exp(torch.linspace(math.log(lowerCamelCase_ ) , math.log(lowerCamelCase_ ) , lowerCamelCase_ ) )
_snake_case : List[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
_snake_case : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_snake_case : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_snake_case : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_snake_case : Tuple = self.discrete_sigmas[timesteps].to(sample.device )
_snake_case : Optional[Any] = self.get_adjacent_sigma(lowerCamelCase_ , lowerCamelCase_ ).to(sample.device )
_snake_case : List[str] = torch.zeros_like(lowerCamelCase_ )
_snake_case : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_snake_case : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_snake_case : List[Any] = diffusion.unsqueeze(-1 )
_snake_case : Tuple = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_snake_case : Optional[int] = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCamelCase_ , device=sample.device , dtype=sample.dtype )
_snake_case : Tuple = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_snake_case : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase_ , prev_sample_mean=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_snake_case : Optional[Any] = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCamelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_snake_case : Optional[int] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_snake_case : Any = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_snake_case : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_snake_case : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_snake_case : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_snake_case : Dict = step_size.unsqueeze(-1 )
_snake_case : Dict = sample + step_size * model_output
_snake_case : Optional[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.FloatTensor , ):
'''simple docstring'''
_snake_case : Union[str, Any] = timesteps.to(original_samples.device )
_snake_case : Optional[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_snake_case : Dict = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase_ ) * sigmas[:, None, None, None]
)
_snake_case : int = noise + original_samples
return noisy_samples
def __len__( self : Dict ):
'''simple docstring'''
return self.config.num_train_timesteps
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Any = '''▁'''
lowercase_ : str = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
lowercase_ : Dict = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
lowercase_ : Dict = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
lowercase_ : List[Any] = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
lowercase_ : Optional[Any] = {'''mustc''': MUSTC_LANGS}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = MAX_MODEL_INPUT_SIZES
_UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
_UpperCamelCase : List[int] = []
def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]="<s>" , lowerCamelCase_ : str="</s>" , lowerCamelCase_ : Union[str, Any]="<pad>" , lowerCamelCase_ : int="<unk>" , lowerCamelCase_ : int=False , lowerCamelCase_ : Any=False , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
_snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , do_upper_case=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , lang_codes=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
_snake_case : Union[str, Any] = do_upper_case
_snake_case : List[str] = do_lower_case
_snake_case : Tuple = load_json(lowerCamelCase_ )
_snake_case : int = {v: k for k, v in self.encoder.items()}
_snake_case : List[str] = spm_file
_snake_case : Dict = load_spm(lowerCamelCase_ , self.sp_model_kwargs )
if lang_codes is not None:
_snake_case : int = lang_codes
_snake_case : Optional[Any] = LANGUAGES[lang_codes]
_snake_case : List[str] = [f'''<lang:{lang}>''' for lang in self.langs]
_snake_case : int = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
_snake_case : Any = self.lang_tokens
_snake_case : Optional[int] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_snake_case : List[Any] = {}
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
@property
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : Any = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Dict = self.lang_code_to_id[tgt_lang]
_snake_case : Optional[Any] = [lang_code_id]
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase_ , self.encoder[self.unk_token] )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ , self.unk_token )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : int = []
_snake_case : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_snake_case : Dict = self.sp_model.decode(lowerCamelCase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_snake_case : Optional[int] = []
else:
current_sub_tokens.append(lowerCamelCase_ )
_snake_case : Any = self.sp_model.decode(lowerCamelCase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
_snake_case : List[str] = [1] * len(self.prefix_tokens )
_snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase_ )) + ([0] * len(lowerCamelCase_ )) + suffix_ones
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Any = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.__dict__.copy()
_snake_case : int = None
return state
def __setstate__( self : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case : List[str] = {}
_snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[int] = Path(lowerCamelCase_ )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
_snake_case : Any = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_snake_case : Optional[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , lowerCamelCase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCamelCase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase_ , 'wb' ) as fi:
_snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (str(lowerCamelCase_ ), str(lowerCamelCase_ ))
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = sentencepiece.SentencePieceProcessor(**__lowerCAmelCase )
spm.Load(str(__lowerCAmelCase ) )
return spm
def A__( __lowerCAmelCase ):
with open(__lowerCAmelCase , 'r' ) as f:
return json.load(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
with open(__lowerCAmelCase , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=2 )
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[str] = k_size // 2
_snake_case : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_snake_case : int = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) )
return g
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = image.shape[0], image.shape[1]
# dst image height and width
_snake_case : int = height - k_size + 1
_snake_case : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_snake_case : Union[str, Any] = zeros((dst_height * dst_width, k_size * k_size) )
_snake_case : int = 0
for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ):
_snake_case : Optional[Any] = ravel(image[i : i + k_size, j : j + k_size] )
_snake_case : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
_snake_case : List[str] = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = ravel(__lowerCAmelCase )
# reshape and get the dst image
_snake_case : Any = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
lowercase_ : List[Any] = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase_ : Tuple = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase_ : int = gaussian_filter(gray, 3, sigma=1)
lowercase_ : Union[str, Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : Optional[Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = "timm_backbone"
def __init__( self : Optional[int] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Optional[int]=None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : Union[str, Any] = backbone
_snake_case : str = num_channels
_snake_case : Dict = features_only
_snake_case : Union[str, Any] = use_pretrained_backbone
_snake_case : Tuple = True
_snake_case : str = out_indices if out_indices is not None else (-1,)
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowercase_ : List[Any] = logging.get_logger(__name__)
# General docstring
lowercase_ : Tuple = '''MobileNetV1Config'''
# Base docstring
lowercase_ : Any = '''google/mobilenet_v1_1.0_224'''
lowercase_ : Any = [1, 1024, 7, 7]
# Image classification docstring
lowercase_ : Optional[int] = '''google/mobilenet_v1_1.0_224'''
lowercase_ : Dict = '''tabby, tabby cat'''
lowercase_ : Optional[int] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
_snake_case : List[Any] = {}
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = model.mobilenet_va
else:
_snake_case : Tuple = model
_snake_case : List[Any] = 'MobilenetV1/Conv2d_0/'
_snake_case : Optional[int] = backbone.conv_stem.convolution.weight
_snake_case : Union[str, Any] = backbone.conv_stem.normalization.bias
_snake_case : Optional[int] = backbone.conv_stem.normalization.weight
_snake_case : Any = backbone.conv_stem.normalization.running_mean
_snake_case : int = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_snake_case : int = i + 1
_snake_case : List[Any] = i * 2
_snake_case : List[Any] = backbone.layer[pt_index]
_snake_case : Tuple = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_snake_case : Dict = pointer.convolution.weight
_snake_case : Union[str, Any] = pointer.normalization.bias
_snake_case : int = pointer.normalization.weight
_snake_case : Dict = pointer.normalization.running_mean
_snake_case : int = pointer.normalization.running_var
_snake_case : str = backbone.layer[pt_index + 1]
_snake_case : Optional[int] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_snake_case : Any = pointer.convolution.weight
_snake_case : Optional[Any] = pointer.normalization.bias
_snake_case : Any = pointer.normalization.weight
_snake_case : Tuple = pointer.normalization.running_mean
_snake_case : str = pointer.normalization.running_var
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Tuple = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
_snake_case : Any = model.classifier.weight
_snake_case : Union[str, Any] = model.classifier.bias
return tf_to_pt_map
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
_snake_case : Tuple = tf.train.list_variables(__lowerCAmelCase )
_snake_case : List[str] = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
_snake_case : Any = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = array
# Build TF to PyTorch weights loading map
_snake_case : Dict = _build_tf_to_pytorch_map(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
_snake_case : int = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
_snake_case : int = np.transpose(__lowerCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
_snake_case : int = array.squeeze().transpose()
else:
_snake_case : Tuple = np.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
_snake_case : str = torch.from_numpy(__lowerCAmelCase )
tf_weights.pop(__lowerCAmelCase , __lowerCAmelCase )
tf_weights.pop(name + '/RMSProp' , __lowerCAmelCase )
tf_weights.pop(name + '/RMSProp_1' , __lowerCAmelCase )
tf_weights.pop(name + '/ExponentialMovingAverage' , __lowerCAmelCase )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = features.shape[-2:]
_snake_case : Optional[Any] = conv_layer.stride
_snake_case : Optional[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
_snake_case : Any = max(kernel_height - stride_height , 0 )
else:
_snake_case : List[str] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_snake_case : List[str] = max(kernel_width - stride_width , 0 )
else:
_snake_case : Union[str, Any] = max(kernel_width - (in_width % stride_width) , 0 )
_snake_case : Tuple = pad_along_width // 2
_snake_case : Any = pad_along_width - pad_left
_snake_case : Union[str, Any] = pad_along_height // 2
_snake_case : Optional[int] = pad_along_height - pad_top
_snake_case : List[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__lowerCAmelCase , __lowerCAmelCase , 'constant' , 0.0 )
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : MobileNetVaConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[bool] = True , lowerCamelCase_ : Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_snake_case : str = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_snake_case : List[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_snake_case : str = nn.Convad(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=lowerCamelCase_ , groups=lowerCamelCase_ , bias=lowerCamelCase_ , padding_mode='zeros' , )
if use_normalization:
_snake_case : List[Any] = nn.BatchNormad(
num_features=lowerCamelCase_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCamelCase_ , track_running_stats=lowerCamelCase_ , )
else:
_snake_case : Any = None
if use_activation:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : List[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCamelCase_ ):
_snake_case : List[str] = ACTaFN[config.hidden_act]
else:
_snake_case : int = config.hidden_act
else:
_snake_case : Optional[Any] = None
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_snake_case : Dict = apply_tf_padding(lowerCamelCase_ , self.convolution )
_snake_case : List[str] = self.convolution(lowerCamelCase_ )
if self.normalization is not None:
_snake_case : Optional[int] = self.normalization(lowerCamelCase_ )
if self.activation is not None:
_snake_case : List[str] = self.activation(lowerCamelCase_ )
return features
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : str = MobileNetVaConfig
_UpperCamelCase : Union[str, Any] = load_tf_weights_in_mobilenet_va
_UpperCamelCase : int = "mobilenet_v1"
_UpperCamelCase : str = "pixel_values"
_UpperCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCamelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowercase_ : Optional[Any] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase_ : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , a_ , )
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : MobileNetVaConfig , lowerCamelCase_ : bool = True ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
_snake_case : int = config
_snake_case : Union[str, Any] = 32
_snake_case : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
_snake_case : List[str] = MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=config.num_channels , out_channels=lowerCamelCase_ , kernel_size=3 , stride=2 , )
_snake_case : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_snake_case : Tuple = nn.ModuleList()
for i in range(13 ):
_snake_case : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_snake_case : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=3 , stride=strides[i] , groups=lowerCamelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=1 , ) )
_snake_case : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , ):
'''simple docstring'''
_snake_case : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_snake_case : Optional[int] = self.conv_stem(lowerCamelCase_ )
_snake_case : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_snake_case : Optional[int] = layer_module(lowerCamelCase_ )
if output_hidden_states:
_snake_case : Dict = all_hidden_states + (hidden_states,)
_snake_case : List[Any] = hidden_states
if self.pooler is not None:
_snake_case : Union[str, Any] = torch.flatten(self.pooler(lowerCamelCase_ ) , start_dim=1 )
else:
_snake_case : Union[str, Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=lowerCamelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a_ , )
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : MobileNetVaConfig ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
_snake_case : int = config.num_labels
_snake_case : Any = MobileNetVaModel(lowerCamelCase_ )
_snake_case : Any = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_snake_case : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCamelCase_ )
_snake_case : Tuple = nn.Linear(lowerCamelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , ):
'''simple docstring'''
_snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : int = self.mobilenet_va(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : List[Any] = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Union[str, Any] = self.classifier(self.dropout(lowerCamelCase_ ) )
_snake_case : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : Optional[int] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : List[Any] = 'single_label_classification'
else:
_snake_case : Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case : Any = MSELoss()
if self.num_labels == 1:
_snake_case : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case : int = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case : Optional[Any] = CrossEntropyLoss()
_snake_case : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case : Union[str, Any] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
_snake_case : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states , )
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
import math
import random
def A__( __lowerCAmelCase , __lowerCAmelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase_ : Union[str, Any] = 0.02
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(__lowerCAmelCase ):
# Forward propagation
_snake_case : Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_snake_case : Dict = (expected / 1_00) - layer_a
# Error delta
_snake_case : Any = layer_1_error * sigmoid_function(__lowerCAmelCase , __lowerCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Tuple = int(input('''Expected value: '''))
lowercase_ : List[str] = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : Tuple = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "git_vision_model"
def __init__( self : Optional[int] , lowerCamelCase_ : List[str]=7_68 , lowerCamelCase_ : str=30_72 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Optional[int]=12 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : Optional[Any]=2_24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : str="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : Tuple=0.02 , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : List[Any] = hidden_size
_snake_case : int = intermediate_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Union[str, Any] = num_channels
_snake_case : str = patch_size
_snake_case : Optional[int] = image_size
_snake_case : List[Any] = initializer_range
_snake_case : Tuple = attention_dropout
_snake_case : str = layer_norm_eps
_snake_case : int = hidden_act
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
_snake_case : List[Any] = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
_snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : List[str] = "git"
def __init__( self : Dict , lowerCamelCase_ : Dict=None , lowerCamelCase_ : str=3_05_22 , lowerCamelCase_ : str=7_68 , lowerCamelCase_ : Dict=6 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : int=10_24 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : Tuple=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Dict="absolute" , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : str=1_01 , lowerCamelCase_ : List[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
_snake_case : Dict = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
_snake_case : Optional[Any] = GitVisionConfig(**lowerCamelCase_ )
_snake_case : Tuple = vocab_size
_snake_case : int = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Any = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : str = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : List[str] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : List[Any] = position_embedding_type
_snake_case : Any = use_cache
_snake_case : List[Any] = tie_word_embeddings
_snake_case : str = num_image_with_embedding
_snake_case : Optional[int] = bos_token_id
_snake_case : List[Any] = eos_token_id
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Optional[int] = self.__class__.model_type
return output
| 714 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
def A__( __lowerCAmelCase ):
_snake_case : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__lowerCAmelCase ):
if len(__lowerCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCAmelCase ) )
return data_lists
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : list[list[float]] = []
for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = min(__lowerCAmelCase )
_snake_case : Optional[Any] = max(__lowerCAmelCase )
_snake_case : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_snake_case : int = F'''Invalid weight of {weight:f} provided'''
raise ValueError(__lowerCAmelCase )
score_lists.append(__lowerCAmelCase )
return score_lists
def A__( __lowerCAmelCase ):
_snake_case : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCAmelCase ):
_snake_case : Union[str, Any] = final_scores[j] + ele
return final_scores
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = get_data(__lowerCAmelCase )
_snake_case : Dict = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[Any] = generate_final_scores(__lowerCAmelCase )
# append scores to source data
for i, ele in enumerate(__lowerCAmelCase ):
source_data[i].append(__lowerCAmelCase )
return source_data
| 715 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 0 |
'''simple docstring'''
import qiskit
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_snake_case : List[str] = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_snake_case : Tuple = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase_ : List[Any] = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 0 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 719 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 0 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *lowerCamelCase_ : Tuple , **lowerCamelCase_ : int ):
'''simple docstring'''
pass
def A__( __lowerCAmelCase ):
_snake_case : int = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A__( __lowerCAmelCase ):
_snake_case : Tuple = np.array(__lowerCAmelCase )
_snake_case : str = npimg.shape
return {"hash": hashimage(__lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = MaskGenerationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@slow
@require_torch
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_snake_case : Optional[Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 )
# Shortening by hashing
_snake_case : int = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Tuple = 'facebook/sam-vit-huge'
_snake_case : int = pipeline('mask-generation' , model=lowerCamelCase_ )
_snake_case : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_snake_case : str = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
] , )
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowercase_ : Union[str, Any] = random.Random()
def A__( __lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None ):
if rng is None:
_snake_case : str = global_rng
_snake_case : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any=7 , lowerCamelCase_ : List[Any]=4_00 , lowerCamelCase_ : List[str]=20_00 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Dict=1_60 , lowerCamelCase_ : int=8 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : List[str]=40_00 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Any=True , ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Optional[Any] = batch_size
_snake_case : int = min_seq_length
_snake_case : Tuple = max_seq_length
_snake_case : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case : List[str] = padding_value
_snake_case : Optional[Any] = sampling_rate
_snake_case : Optional[int] = return_attention_mask
_snake_case : Optional[int] = do_normalize
_snake_case : List[Any] = feature_size
_snake_case : int = chunk_length
_snake_case : Union[str, Any] = hop_length
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : str=False ):
'''simple docstring'''
def _flatten(lowerCamelCase_ : Dict ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
_snake_case : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case : Optional[Any] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = WhisperFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
_snake_case : Tuple = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[Any] = feat_extract_first.to_dict()
_snake_case : List[Any] = feat_extract_second.to_dict()
_snake_case : List[Any] = feat_extract_first.mel_filters
_snake_case : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Any = os.path.join(lowerCamelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase_ )
_snake_case : List[str] = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
_snake_case : str = feat_extract_first.to_dict()
_snake_case : Union[str, Any] = feat_extract_second.to_dict()
_snake_case : str = feat_extract_first.mel_filters
_snake_case : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case : Dict = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
_snake_case : Dict = feature_extractor(lowerCamelCase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_snake_case : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_snake_case : str = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
_snake_case : Any = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
_snake_case : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_snake_case : Optional[int] = np.asarray(lowerCamelCase_ )
_snake_case : Union[str, Any] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
_snake_case : str = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test truncation required
_snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
_snake_case : str = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
_snake_case : int = [x[: feature_extractor.n_samples] for x in speech_inputs]
_snake_case : Dict = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs_truncated]
_snake_case : str = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
_snake_case : Optional[Any] = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
import torch
_snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : Any = np.random.rand(1_00 , 32 ).astype(np.floataa )
_snake_case : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case : Optional[Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_snake_case : Dict = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_snake_case : Any = ds.sort('id' ).select(range(lowerCamelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Tuple = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_snake_case : str = self._load_datasamples(1 )
_snake_case : Any = WhisperFeatureExtractor()
_snake_case : int = feature_extractor(lowerCamelCase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1e-4 ) )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : Any = self._load_datasamples(1 )[0]
_snake_case : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
_snake_case : Dict = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase_ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ ) - 1 ) < 1e-3 ) )
| 721 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : torch.FloatTensor
class lowercase ( a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 64 , lowerCamelCase_ : int = 20 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : str=77 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : str = "silu" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = "linear" , lowerCamelCase_ : Optional[str] = "prd" , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = num_attention_heads
_snake_case : Optional[Any] = attention_head_dim
_snake_case : Tuple = num_attention_heads * attention_head_dim
_snake_case : Any = additional_embeddings
_snake_case : Union[str, Any] = time_embed_dim or inner_dim
_snake_case : Any = embedding_proj_dim or embedding_dim
_snake_case : int = clip_embed_dim or embedding_dim
_snake_case : Tuple = Timesteps(lowerCamelCase_ , lowerCamelCase_ , 0 )
_snake_case : int = TimestepEmbedding(lowerCamelCase_ , lowerCamelCase_ , out_dim=lowerCamelCase_ , act_fn=lowerCamelCase_ )
_snake_case : Any = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(lowerCamelCase_ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_snake_case : Any = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if encoder_hid_proj_type is None:
_snake_case : List[Any] = None
elif encoder_hid_proj_type == "linear":
_snake_case : str = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_snake_case : List[Any] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase_ ) )
if added_emb_type == "prd":
_snake_case : Any = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase_ ) )
elif added_emb_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_snake_case : Any = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dropout=lowerCamelCase_ , activation_fn='gelu' , attention_bias=lowerCamelCase_ , )
for d in range(lowerCamelCase_ )
] )
if norm_in_type == "layer":
_snake_case : Any = nn.LayerNorm(lowerCamelCase_ )
elif norm_in_type is None:
_snake_case : List[str] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
_snake_case : Optional[int] = nn.LayerNorm(lowerCamelCase_ )
_snake_case : int = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Dict = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase_ , persistent=lowerCamelCase_ )
_snake_case : Union[str, Any] = nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
_snake_case : Tuple = nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = {}
def fn_recursive_add_processors(lowerCamelCase_ : str , lowerCamelCase_ : torch.nn.Module , lowerCamelCase_ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase_ , 'set_processor' ):
_snake_case : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCamelCase_ , lowerCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return processors
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Dict = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCamelCase_ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCamelCase_ : str , lowerCamelCase_ : torch.nn.Module , lowerCamelCase_ : List[Any] ):
if hasattr(lowerCamelCase_ , 'set_processor' ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
module.set_processor(lowerCamelCase_ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCamelCase_ , lowerCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[torch.Tensor, float, int] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[torch.BoolTensor] = None , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
_snake_case : Any = hidden_states.shape[0]
_snake_case : Tuple = timestep
if not torch.is_tensor(lowerCamelCase_ ):
_snake_case : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
_snake_case : List[Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : List[str] = timesteps * torch.ones(lowerCamelCase_ , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Any = self.time_proj(lowerCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : List[str] = timesteps_projected.to(dtype=self.dtype )
_snake_case : int = self.time_embedding(lowerCamelCase_ )
if self.embedding_proj_norm is not None:
_snake_case : Dict = self.embedding_proj_norm(lowerCamelCase_ )
_snake_case : str = self.embedding_proj(lowerCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : int = self.encoder_hidden_states_proj(lowerCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : List[str] = self.proj_in(lowerCamelCase_ )
_snake_case : Union[str, Any] = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : Tuple = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : Optional[int] = hidden_states[:, None, :]
_snake_case : Tuple = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase_ , -1 , -1 )
additional_embeds.append(lowerCamelCase_ )
_snake_case : str = torch.cat(
lowerCamelCase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Dict = F.pad(
lowerCamelCase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Union[str, Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : List[str] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : str = F.pad(lowerCamelCase_ , (0, self.additional_embeddings) , value=0.0 )
_snake_case : List[str] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : Dict = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Optional[Any] = self.norm_in(lowerCamelCase_ )
for block in self.transformer_blocks:
_snake_case : int = block(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
_snake_case : Dict = self.norm_out(lowerCamelCase_ )
if self.prd_embedding is not None:
_snake_case : Optional[int] = hidden_states[:, -1]
else:
_snake_case : str = hidden_states[:, additional_embeddings_len:]
_snake_case : List[str] = self.proj_to_clip_embeddings(lowerCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Any = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
import string
def A__( __lowerCAmelCase ):
for key in range(len(string.ascii_uppercase ) ):
_snake_case : List[Any] = ''
for symbol in message:
if symbol in string.ascii_uppercase:
_snake_case : Optional[int] = string.ascii_uppercase.find(__lowerCAmelCase )
_snake_case : Tuple = num - key
if num < 0:
_snake_case : List[Any] = num + len(string.ascii_uppercase )
_snake_case : str = translated + string.ascii_uppercase[num]
else:
_snake_case : Union[str, Any] = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def A__( ):
_snake_case : Union[str, Any] = input('Encrypted message: ' )
_snake_case : Optional[int] = message.upper()
decrypt(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : List[Any] = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "van"
def __init__( self : Any , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Any=[7, 3, 3, 3] , lowerCamelCase_ : Tuple=[4, 2, 2, 2] , lowerCamelCase_ : Any=[64, 1_28, 3_20, 5_12] , lowerCamelCase_ : Optional[Any]=[3, 3, 12, 3] , lowerCamelCase_ : Optional[int]=[8, 8, 4, 4] , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : str=1e-6 , lowerCamelCase_ : Tuple=1e-2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=0.0 , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : List[str] = image_size
_snake_case : List[Any] = num_channels
_snake_case : int = patch_sizes
_snake_case : List[str] = strides
_snake_case : Tuple = hidden_sizes
_snake_case : Optional[int] = depths
_snake_case : str = mlp_ratios
_snake_case : Tuple = hidden_act
_snake_case : int = initializer_range
_snake_case : Optional[int] = layer_norm_eps
_snake_case : Any = layer_scale_init_value
_snake_case : List[Any] = drop_path_rate
_snake_case : Optional[Any] = dropout_rate
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case : Tuple = (low + high) // 2
_snake_case : Tuple = max_subarray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_snake_case : Union[str, Any] = max_subarray(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
_snake_case : List[Any] = max_cross_sum(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = float('-inf' ), -1
_snake_case : Optional[Any] = float('-inf' ), -1
_snake_case : int | float = 0
for i in range(__lowerCAmelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case : Any = summ
_snake_case : List[str] = i
_snake_case : Tuple = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case : List[str] = summ
_snake_case : int = i
return max_left, max_right, (left_sum + right_sum)
def A__( __lowerCAmelCase ):
_snake_case : Tuple = [randint(1 , __lowerCAmelCase ) for _ in range(__lowerCAmelCase )]
_snake_case : str = time.time()
max_subarray(__lowerCAmelCase , 0 , input_size - 1 )
_snake_case : Union[str, Any] = time.time()
return end - start
def A__( ):
_snake_case : Dict = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
_snake_case : Tuple = [time_max_subarray(__lowerCAmelCase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowerCAmelCase , __lowerCAmelCase ):
print(__lowerCAmelCase , '\t\t' , __lowerCAmelCase )
plt.plot(__lowerCAmelCase , __lowerCAmelCase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowercase_ : str = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A__( __lowerCAmelCase ):
_snake_case : Union[str, Any] = test_results.split(' ' )
_snake_case : List[Any] = 0
_snake_case : str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_snake_case : str = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__( __lowerCAmelCase ):
_snake_case : Optional[int] = {}
_snake_case : Optional[int] = None
_snake_case : Dict = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , __lowerCAmelCase ):
_snake_case : List[Any] = True
_snake_case : List[Any] = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
_snake_case : Any = line
_snake_case : Tuple = False
return failures
class lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : int = title
_snake_case : Dict = doc_test_results['time_spent'].split(',' )[0]
_snake_case : Optional[int] = doc_test_results['success']
_snake_case : str = doc_test_results['failures']
_snake_case : Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
_snake_case : List[str] = doc_test_results
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Tuple = [self._time_spent]
_snake_case : int = 0
for time in time_spent:
_snake_case : Optional[int] = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCamelCase_ ) == 1:
_snake_case : Any = [0, 0, time_parts[0]]
_snake_case : Any = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_snake_case : Union[str, Any] = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'''{int(lowerCamelCase_ )}h{int(lowerCamelCase_ )}m{int(lowerCamelCase_ )}s'''
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Tuple = 40
_snake_case : Any = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowerCamelCase_ , lowerCamelCase_ )}
_snake_case : Tuple = ''
for category, failures in category_failures.items():
if len(lowerCamelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCamelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCamelCase_ )
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
_snake_case : Dict = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(lowerCamelCase_ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowerCamelCase_ , )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
_snake_case : Union[str, Any] = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
_snake_case : List[str] = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowerCamelCase_ , )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : str = ''
for key, value in failures.items():
_snake_case : List[Any] = value[:2_00] + ' [Truncated]' if len(lowerCamelCase_ ) > 2_50 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
_snake_case : Any = job_name
_snake_case : Optional[int] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
_snake_case : Union[str, Any] = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
_snake_case : Any = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
_snake_case : str = sorted(self.doc_test_results.items() , key=lambda lowerCamelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
_snake_case : Optional[int] = f'''*Num failures* :{len(job_result['failed'] )} \n'''
_snake_case : List[Any] = job_result['failures']
_snake_case : Optional[Any] = self.get_reply_blocks(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , text=lowerCamelCase_ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'''Results for {job}''' , blocks=lowerCamelCase_ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A__( ):
_snake_case : Dict = os.environ['GITHUB_RUN_ID']
_snake_case : str = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_snake_case : Any = requests.get(__lowerCAmelCase ).json()
_snake_case : Optional[int] = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
_snake_case : Any = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__lowerCAmelCase ):
_snake_case : Union[str, Any] = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , __lowerCAmelCase )
return {}
def A__( __lowerCAmelCase ):
_snake_case : str = {}
if os.path.exists(__lowerCAmelCase ):
_snake_case : Any = os.listdir(__lowerCAmelCase )
for file in files:
try:
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , encoding='utf-8' ) as f:
_snake_case : List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCAmelCase , __lowerCAmelCase )}.''' ) from e
return _artifact
def A__( ):
class lowercase :
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : str = name
_snake_case : int = []
def __str__( self : Any ):
'''simple docstring'''
return self.name
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
self.paths.append({'name': self.name, 'path': path} )
_snake_case : Dict[str, Artifact] = {}
_snake_case : Dict = filter(os.path.isdir , os.listdir() )
for directory in directories:
_snake_case : int = directory
if artifact_name not in _available_artifacts:
_snake_case : Optional[Any] = Artifact(__lowerCAmelCase )
_available_artifacts[artifact_name].add_path(__lowerCAmelCase )
return _available_artifacts
if __name__ == "__main__":
lowercase_ : Optional[int] = get_job_links()
lowercase_ : int = retrieve_available_artifacts()
lowercase_ : List[str] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowercase_ : List[str] = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowercase_ : Dict = github_actions_job_links.get('''run_doctests''')
lowercase_ : Dict = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
lowercase_ : List[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
lowercase_ : Dict = handle_test_results(artifact['''stats'''])
lowercase_ : str = failed
lowercase_ : Dict = success
lowercase_ : Optional[Any] = time_spent[1:-1] + ''', '''
lowercase_ : int = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
lowercase_ : Dict = line.replace('''FAILED ''', '''''')
lowercase_ : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
lowercase_ : Dict = line.split('''::''')
else:
lowercase_ : int = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowercase_ : Optional[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowercase_ : int = all_failures[test] if test in all_failures else '''N/A'''
lowercase_ : Tuple = failure
break
lowercase_ : int = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[str] = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_snake_case : Optional[int] = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = vqa_pipeline(lowerCamelCase_ , top_k=1 )
self.assertEqual(
lowerCamelCase_ , [
[{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}],
[{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}],
] , )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_snake_case : List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
_snake_case : Any = 'How many cats are there?'
_snake_case : Any = vqa_pipeline(image=lowerCamelCase_ , question='How many cats are there?' , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}, {'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}] )
_snake_case : Tuple = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}, {'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}] )
@slow
@require_torch
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
_snake_case : Tuple = './tests/fixtures/tests_samples/COCO/000000039769.png'
_snake_case : int = 'How many cats are there?'
_snake_case : List[str] = vqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_snake_case : Union[str, Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_snake_case : Any = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=13 , lowerCamelCase_ : int=30 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Optional[Any]=37 , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[str]=10 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : str=2 , ):
'''simple docstring'''
_snake_case : str = parent
_snake_case : List[Any] = batch_size
_snake_case : Dict = image_size
_snake_case : Any = patch_size
_snake_case : Any = num_channels
_snake_case : str = is_training
_snake_case : Any = use_labels
_snake_case : Union[str, Any] = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Any = intermediate_size
_snake_case : str = hidden_act
_snake_case : Tuple = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = type_sequence_label_size
_snake_case : List[Any] = initializer_range
_snake_case : Optional[int] = scope
_snake_case : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_snake_case : Tuple = (image_size // patch_size) ** 2
_snake_case : Optional[int] = num_patches + 2
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : Dict = TFDeiTModel(config=lowerCamelCase_ )
_snake_case : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : Tuple = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
_snake_case : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case : Dict = 1
_snake_case : List[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
_snake_case : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : List[Any] = self.type_sequence_label_size
_snake_case : Dict = TFDeiTForImageClassification(lowerCamelCase_ )
_snake_case : Tuple = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Optional[int] = 1
_snake_case : List[Any] = TFDeiTForImageClassification(lowerCamelCase_ )
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : List[str] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : str = self.prepare_config_and_inputs()
_snake_case : Union[str, Any] = config_and_inputs
_snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCamelCase : int = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[int] = False
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = TFDeiTModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Dense ) )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(lowerCamelCase_ )
_snake_case : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : str = [*signature.parameters.keys()]
_snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=False ):
'''simple docstring'''
_snake_case : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Union[str, Any] = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__( ):
_snake_case : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
_snake_case : Any = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Dict = image_processor(images=lowerCamelCase_ , return_tensors='tf' )
# forward pass
_snake_case : Any = model(**lowerCamelCase_ )
# verify the logits
_snake_case : Tuple = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_snake_case : Optional[int] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
(_snake_case ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = len(__lowerCAmelCase )
_snake_case : Union[str, Any] = len(__lowerCAmelCase )
_snake_case : Optional[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_snake_case : Optional[int] = True
for i in range(__lowerCAmelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_snake_case : List[str] = True
if a[i].islower():
_snake_case : Optional[int] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowercase_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase_ : Any = dict(zip(vocab, range(len(vocab))))
lowercase_ : List[str] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Tuple = Path(tmpdirname)
lowercase_ : List[Any] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowercase_ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowercase_ : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowercase_ : List[str] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowercase_ : Tuple = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowercase_ : List[str] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
lowercase_ : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowercase_ : int = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ : Tuple = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
_snake_case : Optional[int] = str(bin(__lowerCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
_snake_case : Dict = str(bin(__lowerCAmelCase ) )[2:]
if shift_amount >= len(__lowerCAmelCase ):
return "0b0"
_snake_case : str = binary_number[: len(__lowerCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if number >= 0: # Get binary representation of positive number
_snake_case : int = '0' + str(bin(__lowerCAmelCase ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
_snake_case : str = len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number
_snake_case : Optional[Any] = bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
_snake_case : int = (
'1' + '0' * (binary_number_length - len(__lowerCAmelCase )) + binary_number
)
if shift_amount >= len(__lowerCAmelCase ):
return "0b" + binary_number[0] * len(__lowerCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
import argparse
import os
import re
import packaging.version
lowercase_ : Optional[int] = '''examples/'''
lowercase_ : List[Any] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowercase_ : int = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
lowercase_ : int = '''README.md'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case : Optional[int] = f.read()
_snake_case : Dict = REPLACE_PATTERNS[pattern]
_snake_case : Tuple = replace.replace('VERSION' , __lowerCAmelCase )
_snake_case : str = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern='examples' )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__( ):
_snake_case : List[Any] = '🤗 Transformers currently provides the following architectures'
_snake_case : List[Any] = '1. Want to contribute a new model?'
with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case : str = f.readlines()
# Find the start of the list.
_snake_case : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_snake_case : Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_snake_case : Tuple = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__lowerCAmelCase )
def A__( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
_snake_case : Optional[int] = f.read()
_snake_case : Tuple = REPLACE_PATTERNS['init'][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__( __lowerCAmelCase=False ):
_snake_case : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_snake_case : int = default_version.base_version
elif patch:
_snake_case : Optional[Any] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_snake_case : Any = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_snake_case : List[Any] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
_snake_case : Tuple = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
def A__( ):
_snake_case : Tuple = get_version()
_snake_case : List[Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_snake_case : List[Any] = current_version.base_version
# Check with the user we got that right.
_snake_case : Union[str, Any] = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
_snake_case : Dict = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowercase_ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Tuple = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Tuple = "conditional_detr"
_UpperCamelCase : List[str] = ["past_key_values"]
_UpperCamelCase : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any] , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : List[str]=3_00 , lowerCamelCase_ : int=6 , lowerCamelCase_ : Optional[Any]=20_48 , lowerCamelCase_ : Any=8 , lowerCamelCase_ : Tuple=6 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Dict=8 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Optional[int]="relu" , lowerCamelCase_ : Dict=2_56 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : Union[str, Any]=1.0 , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : str="sine" , lowerCamelCase_ : Dict="resnet50" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=5 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : str=1 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Optional[Any]=5 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : str=0.25 , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_snake_case : str = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : List[Any] = backbone_config.get('model_type' )
_snake_case : str = CONFIG_MAPPING[backbone_model_type]
_snake_case : str = config_class.from_dict(lowerCamelCase_ )
_snake_case : int = use_timm_backbone
_snake_case : Optional[int] = backbone_config
_snake_case : Tuple = num_channels
_snake_case : List[str] = num_queries
_snake_case : List[str] = d_model
_snake_case : Dict = encoder_ffn_dim
_snake_case : Union[str, Any] = encoder_layers
_snake_case : Union[str, Any] = encoder_attention_heads
_snake_case : Dict = decoder_ffn_dim
_snake_case : Any = decoder_layers
_snake_case : List[str] = decoder_attention_heads
_snake_case : Any = dropout
_snake_case : str = attention_dropout
_snake_case : List[Any] = activation_dropout
_snake_case : List[str] = activation_function
_snake_case : Any = init_std
_snake_case : int = init_xavier_std
_snake_case : str = encoder_layerdrop
_snake_case : Tuple = decoder_layerdrop
_snake_case : Optional[Any] = encoder_layers
_snake_case : Optional[Any] = auxiliary_loss
_snake_case : Any = position_embedding_type
_snake_case : Optional[Any] = backbone
_snake_case : List[Any] = use_pretrained_backbone
_snake_case : List[str] = dilation
# Hungarian matcher
_snake_case : List[Any] = class_cost
_snake_case : int = bbox_cost
_snake_case : Optional[int] = giou_cost
# Loss coefficients
_snake_case : Any = mask_loss_coefficient
_snake_case : str = dice_loss_coefficient
_snake_case : Dict = cls_loss_coefficient
_snake_case : str = bbox_loss_coefficient
_snake_case : Any = giou_loss_coefficient
_snake_case : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_snake_case : Optional[int] = self.backbone_config.to_dict()
_snake_case : Union[str, Any] = self.__class__.model_type
return output
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = version.parse("1.11" )
@property
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return 12
| 714 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase_ : str = False
@skip_mps
class lowercase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = StableDiffusionAttendAndExcitePipeline
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Any = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
_UpperCamelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __UpperCAmelCase ( cls : Dict ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : int ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
_snake_case : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
_snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_snake_case : Optional[int] = CLIPTextModel(lowerCamelCase_ )
_snake_case : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
_snake_case : str = torch.manual_seed(lowerCamelCase_ )
else:
_snake_case : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_snake_case : Any = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = 'cpu'
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : List[Any] = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = self.get_dummy_inputs(lowerCamelCase_ )
_snake_case : Tuple = pipe(**lowerCamelCase_ ).images
_snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
_snake_case : Optional[int] = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
_snake_case : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1e-3 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Dict = torch.manual_seed(51 )
_snake_case : Any = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to('cuda' )
_snake_case : Union[str, Any] = 'a painting of an elephant with glasses'
_snake_case : Union[str, Any] = [5, 7]
_snake_case : int = pipe(
prompt=lowerCamelCase_ , token_indices=lowerCamelCase_ , guidance_scale=7.5 , generator=lowerCamelCase_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
_snake_case : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 715 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 0 |
def A__( __lowerCAmelCase ):
_snake_case : List[Any] = [0] * len(__lowerCAmelCase )
for i in range(1 , len(__lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
_snake_case : Tuple = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case : List[str] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case : Union[str, Any] = j
return prefix_result
def A__( __lowerCAmelCase ):
return max(prefix_function(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A__( __lowerCAmelCase=None ):
_snake_case : Union[str, Any] = argparse.ArgumentParser(add_help=__lowerCAmelCase , allow_abbrev=__lowerCAmelCase )
# The main config parser
_snake_case : Optional[int] = config_command_parser(__lowerCAmelCase )
# The subparser to add commands to
_snake_case : Dict = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(__lowerCAmelCase , parents=[parent_parser] )
update_command_parser(__lowerCAmelCase , parents=[parent_parser] )
return config_parser
def A__( ):
_snake_case : Optional[Any] = get_config_parser()
_snake_case : List[str] = config_parser.parse_args()
if not hasattr(__lowerCAmelCase , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 718 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase_ : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[str] = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : List[Any] = ["image_processor", "tokenizer"]
_UpperCamelCase : List[str] = "Pix2StructImageProcessor"
_UpperCamelCase : int = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = False
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self : Union[str, Any] , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = 20_48 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_snake_case : str = self.tokenizer
_snake_case : Dict = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_snake_case : Optional[int] = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , **lowerCamelCase_ )
else:
# add pixel_values and bbox
_snake_case : List[Any] = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , header_text=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and not self.image_processor.is_vqa:
_snake_case : Dict = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
if "attention_mask" in text_encoding:
_snake_case : Tuple = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_snake_case : Tuple = text_encoding.pop('input_ids' )
else:
_snake_case : Any = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase_ )
return encoding_image_processor
def __UpperCAmelCase ( self : Optional[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer.model_input_names
_snake_case : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_ , 'depth_multiplier' ) )
class lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Optional[Any]=0.25 , lowerCamelCase_ : str=8 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : int=10_24 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : str="relu6" , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Dict=10 , lowerCamelCase_ : str=None , ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : int = batch_size
_snake_case : Optional[Any] = num_channels
_snake_case : Optional[Any] = image_size
_snake_case : Optional[Any] = depth_multiplier
_snake_case : Tuple = min_depth
_snake_case : Any = tf_padding
_snake_case : Optional[Any] = int(last_hidden_size * depth_multiplier )
_snake_case : Any = output_stride
_snake_case : str = hidden_act
_snake_case : Dict = classifier_dropout_prob
_snake_case : Union[str, Any] = use_labels
_snake_case : Dict = is_training
_snake_case : str = num_labels
_snake_case : Union[str, Any] = initializer_range
_snake_case : Optional[int] = scope
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[Any] = None
_snake_case : Dict = None
if self.use_labels:
_snake_case : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = MobileNetVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = self.num_labels
_snake_case : Tuple = MobileNetVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : Tuple = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = self.prepare_config_and_inputs()
_snake_case : Dict = config_and_inputs
_snake_case : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_UpperCamelCase : str = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Tuple = False
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = MobileNetVaModelTester(self )
_snake_case : str = MobileNetVaConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = model_class(lowerCamelCase_ )
_snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Any = [*signature.parameters.keys()]
_snake_case : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
_snake_case : Dict = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
_snake_case : List[str] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_snake_case : str = outputs.hidden_states
_snake_case : Tuple = 26
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
_snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Dict = MobileNetVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__( ):
_snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(lowerCamelCase_ )
_snake_case : List[str] = self.default_image_processor
_snake_case : Optional[int] = prepare_img()
_snake_case : Optional[Any] = image_processor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**lowerCamelCase_ )
# verify the logits
_snake_case : Any = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_snake_case : List[str] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 721 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = DDIMPipeline
_UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : List[str] = False
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_snake_case : List[Any] = DDIMScheduler()
_snake_case : Union[str, Any] = {'unet': unet, 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str]=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
_snake_case : str = torch.manual_seed(lowerCamelCase_ )
else:
_snake_case : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu'
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : int = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Dict = self.get_dummy_inputs(lowerCamelCase_ )
_snake_case : Optional[Any] = pipe(**lowerCamelCase_ ).images
_snake_case : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_snake_case : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_snake_case : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1e-3 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = 'google/ddpm-cifar10-32'
_snake_case : Any = UNetaDModel.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = DDIMScheduler()
_snake_case : List[str] = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddim.to(lowerCamelCase_ )
ddim.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = torch.manual_seed(0 )
_snake_case : List[Any] = ddim(generator=lowerCamelCase_ , eta=0.0 , output_type='numpy' ).images
_snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : Union[str, Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = 'google/ddpm-ema-bedroom-256'
_snake_case : Union[str, Any] = UNetaDModel.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[int] = DDIMScheduler.from_pretrained(lowerCamelCase_ )
_snake_case : str = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddpm.to(lowerCamelCase_ )
ddpm.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = torch.manual_seed(0 )
_snake_case : Optional[Any] = ddpm(generator=lowerCamelCase_ , output_type='numpy' ).images
_snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_snake_case : Dict = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : str = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "t5"
_UpperCamelCase : Optional[int] = ["past_key_values"]
_UpperCamelCase : List[str] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : List[Any] , lowerCamelCase_ : Dict=3_21_28 , lowerCamelCase_ : Dict=5_12 , lowerCamelCase_ : Optional[int]=64 , lowerCamelCase_ : Union[str, Any]=20_48 , lowerCamelCase_ : Optional[Any]=6 , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=8 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Any=1_28 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Dict=1e-6 , lowerCamelCase_ : Any=1.0 , lowerCamelCase_ : Dict="relu" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=0 , lowerCamelCase_ : Dict=1 , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
_snake_case : str = vocab_size
_snake_case : Tuple = d_model
_snake_case : Union[str, Any] = d_kv
_snake_case : Any = d_ff
_snake_case : Dict = num_layers
_snake_case : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case : Any = num_heads
_snake_case : Tuple = relative_attention_num_buckets
_snake_case : Union[str, Any] = relative_attention_max_distance
_snake_case : List[str] = dropout_rate
_snake_case : List[Any] = layer_norm_epsilon
_snake_case : Dict = initializer_factor
_snake_case : Any = feed_forward_proj
_snake_case : Dict = use_cache
_snake_case : Any = self.feed_forward_proj.split('-' )
_snake_case : Dict = act_info[-1]
_snake_case : Dict = act_info[0] == 'gated'
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_snake_case : Union[str, Any] = 'gelu_new'
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase ( a_ ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_snake_case : Union[str, Any] = 'past_encoder_sequence + sequence'
_snake_case : Any = {0: 'batch'}
_snake_case : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_snake_case : int = {0: 'batch', 1: 'decoder_sequence'}
_snake_case : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='inputs' )
return common_inputs
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return 13
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
import os
from datetime import datetime as dt
from github import Github
lowercase_ : Optional[int] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def A__( ):
_snake_case : str = Github(os.environ['GITHUB_TOKEN'] )
_snake_case : Any = g.get_repo('huggingface/diffusers' )
_snake_case : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_snake_case : Optional[int] = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
_snake_case : Tuple = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
class lowercase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[list[bool]] ):
'''simple docstring'''
_snake_case : List[str] = row
_snake_case : Tuple = col
_snake_case : List[Any] = graph
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[list[bool]] ):
'''simple docstring'''
_snake_case : int = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_snake_case : Union[str, Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
_snake_case : str = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase_ )
def __UpperCAmelCase ( self : str ): # And finally, count all islands.
'''simple docstring'''
_snake_case : List[Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
_snake_case : List[str] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
count += 1
return count
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : List[str]=18 , lowerCamelCase_ : str=30 , lowerCamelCase_ : Optional[Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Dict=True , lowerCamelCase_ : int=None , lowerCamelCase_ : int=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Optional[Any]=False , ):
'''simple docstring'''
_snake_case : str = size if size is not None else {'height': 20, 'width': 20}
_snake_case : Optional[int] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_snake_case : List[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : List[Any] = num_channels
_snake_case : Optional[int] = image_size
_snake_case : Any = min_resolution
_snake_case : str = max_resolution
_snake_case : int = do_resize
_snake_case : Tuple = size
_snake_case : Union[str, Any] = do_center_crop
_snake_case : int = crop_size
_snake_case : Optional[int] = do_normalize
_snake_case : Tuple = image_mean
_snake_case : Any = image_std
_snake_case : Union[str, Any] = do_reduce_labels
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A__( ):
_snake_case : Union[str, Any] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_snake_case : Tuple = Image.open(dataset[0]['file'] )
_snake_case : Union[str, Any] = Image.open(dataset[1]['file'] )
return image, map
def A__( ):
_snake_case : Tuple = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_snake_case : Union[str, Any] = Image.open(ds[0]['file'] )
_snake_case : Dict = Image.open(ds[1]['file'] )
_snake_case : Optional[Any] = Image.open(ds[2]['file'] )
_snake_case : Union[str, Any] = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : str = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : int = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_std' ) )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
_snake_case : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase_ )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
_snake_case : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case : List[Any] = image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
_snake_case : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case : Optional[int] = image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
_snake_case : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case : List[str] = image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
_snake_case : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_snake_case : str = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
# Test batched
_snake_case : Optional[int] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
# Test not batched input (PIL images)
_snake_case : Any = prepare_semantic_single_inputs()
_snake_case : Optional[int] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
# Test batched input (PIL images)
_snake_case : Optional[int] = prepare_semantic_batch_inputs()
_snake_case : Any = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_snake_case : str = prepare_semantic_single_inputs()
_snake_case : List[Any] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 1_50 )
_snake_case : int = True
_snake_case : List[str] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def A__( __lowerCAmelCase = 2_00_00_00 ):
_snake_case : list[int] = [0]
_snake_case : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_snake_case : int = 0
# the area corresponding to the grid that gives the product closest to target
_snake_case : int = 0
# an estimate of b, using the quadratic formula
_snake_case : float
# the largest integer less than b_estimate
_snake_case : int
# the largest integer less than b_estimate
_snake_case : int
# the triangle number corresponding to b_floor
_snake_case : int
# the triangle number corresponding to b_ceil
_snake_case : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_snake_case : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_snake_case : List[str] = floor(__lowerCAmelCase )
_snake_case : Tuple = ceil(__lowerCAmelCase )
_snake_case : List[Any] = triangle_numbers[b_floor]
_snake_case : List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_snake_case : Any = triangle_b_first_guess * triangle_a
_snake_case : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_snake_case : List[str] = triangle_b_second_guess * triangle_a
_snake_case : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self : Optional[int] , **lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[str] = {
'num_train_timesteps': 10_00,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowerCamelCase_ )
return config
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCamelCase_ , prev_timestep=lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Any = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config(variance_type='fixed_small_log' )
_snake_case : str = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1e-5
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(variance_type='learned_range' )
_snake_case : Dict = scheduler_class(**lowerCamelCase_ )
_snake_case : List[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCamelCase_ ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=lowerCamelCase_ ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=lowerCamelCase_ ) - -0.001_0011 < 1e-5
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config()
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ )
_snake_case : Tuple = scheduler.timesteps
_snake_case : Dict = self.dummy_model()
_snake_case : Any = self.dummy_sample_deter
_snake_case : List[str] = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
_snake_case : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
_snake_case : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
_snake_case : Union[str, Any] = pred_prev_sample
_snake_case : str = torch.sum(torch.abs(lowerCamelCase_ ) )
_snake_case : Dict = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : Optional[int] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(25 )
_snake_case : Union[str, Any] = scheduler.timesteps
_snake_case : Tuple = self.dummy_model()
_snake_case : str = self.dummy_sample_deter
_snake_case : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
_snake_case : Any = model(lowerCamelCase_ , lowerCamelCase_ )
if i + 1 == timesteps.shape[0]:
_snake_case : List[Any] = None
else:
_snake_case : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_snake_case : Any = scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , prev_timestep=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
_snake_case : Optional[int] = pred_prev_sample
_snake_case : Tuple = torch.sum(torch.abs(lowerCamelCase_ ) )
_snake_case : Dict = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase_ : str = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def A__( __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_snake_case : str = [image]
_snake_case : Dict = [trans(img.convert('RGB' ) ) for img in image]
_snake_case : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case : List[str] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : Optional[int] = min(int(num_inference_steps * strength ) , lowerCamelCase_ )
_snake_case : List[Any] = max(num_inference_steps - init_timestep , 0 )
_snake_case : Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int=None ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase_ )}''' )
_snake_case : Optional[int] = image.to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_snake_case : Union[str, Any] = init_latents.shape
_snake_case : List[Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ )
# get latents
print('add noise to latents at timestep' , lowerCamelCase_ )
_snake_case : Optional[Any] = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = init_latents
return latents
@torch.no_grad()
def __call__( self : str , lowerCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase_ : float = 0.8 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
self.check_inputs(lowerCamelCase_ )
# 2. Preprocess image
_snake_case : Any = preprocess(lowerCamelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ , device=self.device )
_snake_case : Optional[int] = self.get_timesteps(lowerCamelCase_ , lowerCamelCase_ , self.device )
_snake_case : Optional[int] = timesteps[:1].repeat(lowerCamelCase_ )
# 4. Prepare latent variables
_snake_case : str = self.prepare_latents(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.unet.dtype , self.device , lowerCamelCase_ )
_snake_case : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase_ ):
# 1. predict noise model_output
_snake_case : Any = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case : str = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , eta=lowerCamelCase_ , use_clipped_model_output=lowerCamelCase_ , generator=lowerCamelCase_ , ).prev_sample
_snake_case : int = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case : str = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase_ )
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_snake_case : str = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_snake_case : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_snake_case : Optional[Any] = {'unk_token': '<unk>'}
_snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
_snake_case : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_snake_case : str = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_snake_case : int = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = self.get_image_processor()
_snake_case : Dict = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
_snake_case : Tuple = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_snake_case : Any = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
_snake_case : str = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = self.get_image_processor()
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : int = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Tuple = image_processor(lowerCamelCase_ , return_tensors='np' )
_snake_case : List[str] = processor(images=lowerCamelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Optional[Any] = 'lower newer'
_snake_case : List[str] = processor(text=lowerCamelCase_ )
_snake_case : str = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : Any = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Dict = 'lower newer'
_snake_case : Optional[Any] = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : List[Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Optional[Any] = processor.batch_decode(lowerCamelCase_ )
_snake_case : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Tuple = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Dict = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : List[Any] = 'lower newer'
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : List[str] = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
from __future__ import annotations
import bisect
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = -1 ):
if hi < 0:
_snake_case : int = len(__lowerCAmelCase )
while lo < hi:
_snake_case : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_snake_case : Optional[Any] = mid + 1
else:
_snake_case : List[str] = mid
return lo
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = -1 ):
if hi < 0:
_snake_case : Dict = len(__lowerCAmelCase )
while lo < hi:
_snake_case : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_snake_case : List[Any] = mid + 1
else:
_snake_case : List[str] = mid
return lo
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = -1 ):
sorted_collection.insert(bisect_left(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = -1 ):
sorted_collection.insert(bisect_right(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = len(__lowerCAmelCase ) - 1
while left <= right:
_snake_case : int = left + (right - left) // 2
_snake_case : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_snake_case : List[str] = midpoint - 1
else:
_snake_case : Optional[int] = midpoint + 1
return None
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = bisect.bisect_left(__lowerCAmelCase , __lowerCAmelCase )
if index != len(__lowerCAmelCase ) and sorted_collection[index] == item:
return index
return None
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if right < left:
return None
_snake_case : Tuple = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , midpoint + 1 , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : List[str] = input('''Enter numbers separated by comma:\n''').strip()
lowercase_ : Any = sorted(int(item) for item in user_input.split(''','''))
lowercase_ : Tuple = int(input('''Enter a single number to be found in the list:\n'''))
lowercase_ : Optional[int] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import math
def A__( __lowerCAmelCase = 1_00 ):
_snake_case : str = sum(i * i for i in range(1 , n + 1 ) )
_snake_case : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 714 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
import math
import unittest
def A__( __lowerCAmelCase ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 715 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Union[str, Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowercase_ : Tuple = get_logger(__name__)
lowercase_ : Union[str, Any] = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class lowercase :
"""simple docstring"""
@add_start_docstrings(lowerCamelCase_ )
def __call__( self : Any , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
"""simple docstring"""
@add_start_docstrings(lowerCamelCase_ )
def __call__( self : Dict , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( a_ ):
"""simple docstring"""
@add_start_docstrings(lowerCamelCase_ )
def __call__( self : Tuple , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int , **lowerCamelCase_ : Dict ):
'''simple docstring'''
for processor in self:
_snake_case : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
_snake_case : Dict = processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
else:
_snake_case : List[Any] = processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : float ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
_snake_case : Tuple = temperature
def __call__( self : Union[str, Any] , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : int = scores / self.temperature
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : float , lowerCamelCase_ : float = -float('Inf' ) , lowerCamelCase_ : int = 1 ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
_snake_case : str = top_p
_snake_case : List[Any] = filter_value
_snake_case : Optional[int] = min_tokens_to_keep
def __call__( self : Dict , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Dict = lax.top_k(lowerCamelCase_ , scores.shape[-1] )
_snake_case : List[Any] = jnp.full_like(lowerCamelCase_ , self.filter_value )
_snake_case : str = jax.nn.softmax(lowerCamelCase_ , axis=-1 ).cumsum(axis=-1 )
_snake_case : List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_snake_case : Optional[int] = jnp.roll(lowerCamelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase_ )
# min tokens to keep
_snake_case : int = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase_ )
_snake_case : Optional[int] = jnp.where(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = jax.lax.sort_key_val(lowerCamelCase_ , lowerCamelCase_ )[-1]
return next_scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : float = -float('Inf' ) , lowerCamelCase_ : int = 1 ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
_snake_case : List[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = filter_value
def __call__( self : str , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : str = scores.shape
_snake_case : Any = jnp.full(batch_size * vocab_size , self.filter_value )
_snake_case : Dict = min(self.top_k , scores.shape[-1] ) # Safety check
_snake_case : Optional[Any] = lax.top_k(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = jnp.broadcast_to((jnp.arange(lowerCamelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_snake_case : List[str] = topk_scores.flatten()
_snake_case : Any = topk_indices.flatten() + shift
_snake_case : Tuple = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase_ )
_snake_case : List[Any] = next_scores_flat.reshape(lowerCamelCase_ , lowerCamelCase_ )
return next_scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : List[Any] = bos_token_id
def __call__( self : str , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : List[Any] = jnp.full(scores.shape , -float('inf' ) )
_snake_case : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
_snake_case : Any = jnp.where(lowerCamelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[int] = max_length
_snake_case : Union[str, Any] = eos_token_id
def __call__( self : Union[str, Any] , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Any = jnp.full(scores.shape , -float('inf' ) )
_snake_case : str = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_snake_case : str = jnp.where(lowerCamelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
_snake_case : str = min_length
_snake_case : Optional[Any] = eos_token_id
def __call__( self : Union[str, Any] , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : List[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_snake_case : Dict = jnp.where(lowerCamelCase_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = list(lowerCamelCase_ )
_snake_case : int = begin_index
def __call__( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Any = 1 - jnp.bool_(cur_len - self.begin_index )
_snake_case : Union[str, Any] = jnp.where(lowerCamelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , lowerCamelCase_ )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : list ):
'''simple docstring'''
_snake_case : Optional[Any] = list(lowerCamelCase_ )
def __call__( self : int , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[int] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = dict(lowerCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_snake_case : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_snake_case : Any = force_token_array.at[index].set(lowerCamelCase_ )
_snake_case : Union[str, Any] = jnp.intaa(lowerCamelCase_ )
def __call__( self : int , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int ):
'''simple docstring'''
def _force_token(lowerCamelCase_ : Any ):
_snake_case : int = scores.shape[0]
_snake_case : List[str] = self.force_token_array[generation_idx]
_snake_case : List[Any] = jnp.ones_like(lowerCamelCase_ , dtype=scores.dtype ) * -float('inf' )
_snake_case : int = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_snake_case : List[Any] = lax.dynamic_update_slice(lowerCamelCase_ , lowerCamelCase_ , (0, current_token) )
return new_scores
_snake_case : Optional[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCamelCase_ ) , lambda: scores , ) , )
return scores
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = generate_config.eos_token_id
_snake_case : Any = generate_config.no_timestamps_token_id
_snake_case : Optional[Any] = generate_config.no_timestamps_token_id + 1
_snake_case : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase_ , 'max_initial_timestamp_index' ):
_snake_case : str = generate_config.max_initial_timestamp_index
else:
_snake_case : Union[str, Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_snake_case : List[Any] = model_config.vocab_size
def __call__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
_snake_case : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCamelCase_ , )
_snake_case : Any = jnp.where((cur_len - self.begin_index) < 2 , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCamelCase_ , lowerCamelCase_ , )
return jnp.where(
lowerCamelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , lowerCamelCase_ , )
_snake_case : Optional[Any] = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = jnp.where(cur_len == self.begin_index , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCamelCase_ , )
_snake_case : Any = self.timestamp_begin + self.max_initial_timestamp_index
_snake_case : str = jnp.where(
lowerCamelCase_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , lowerCamelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
_snake_case : List[Any] = jax.nn.log_softmax(lowerCamelCase_ , axis=-1 )
def handle_cumulative_probs(lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] ):
_snake_case : List[str] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_snake_case : Tuple = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , lowerCamelCase_ , )
_snake_case : Optional[int] = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ , lowerCamelCase_ )
return scores
| 718 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : Optional[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : str = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase :
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return None
class lowercase :
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int ):
'''simple docstring'''
return None
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , 'tf' , 12 , **lowerCamelCase_ )
@require_torch
@slow
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , 'pt' , 12 , **lowerCamelCase_ )
@require_torch
@slow
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
from transformers import BertModel
_snake_case : int = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
_snake_case : List[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_snake_case : Union[str, Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_ , 'pt' , 12 , lowerCamelCase_ )
@require_tf
@slow
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_snake_case : Union[str, Any] = self._test_export(lowerCamelCase_ , 'tf' , 12 , **lowerCamelCase_ )
_snake_case : List[str] = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_snake_case : Any = self._test_export(lowerCamelCase_ , 'pt' , 12 , **lowerCamelCase_ )
_snake_case : int = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : str ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
_snake_case : Union[str, Any] = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
from transformers import BertModel
_snake_case : Dict = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_snake_case : Tuple = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
from transformers import TFBertModel
_snake_case : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_snake_case : Optional[Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , 'tf' )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = FeatureExtractionPipeline(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
_snake_case : Tuple = infer_shapes(lowerCamelCase_ , lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
_snake_case : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
_snake_case : int = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ) , set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_snake_case : int = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : str = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = WavaVecaPhonemeCTCTokenizer
_UpperCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
_snake_case : Union[str, Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_snake_case : Any = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Union[str, Any]=20 , lowerCamelCase_ : Tuple=5 ):
'''simple docstring'''
_snake_case : Union[str, Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_ )) for i in range(len(lowerCamelCase_ ) )]
_snake_case : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCamelCase_ ) , lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
_snake_case : Optional[Any] = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
_snake_case : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Any = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
_snake_case : List[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = ' ' + output_txt
_snake_case : Any = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
_snake_case : Optional[int] = tokenizer('m xxx ɪ' , do_phonemize=lowerCamelCase_ ).input_ids
self.assertEqual(lowerCamelCase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
_snake_case : Any = tokenizer('m aaa ɪ ccc' , do_phonemize=lowerCamelCase_ ).input_ids
self.assertEqual(lowerCamelCase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
_snake_case : List[str] = tokenizer('maɪ c' , do_phonemize=lowerCamelCase_ ).input_ids
self.assertEqual(lowerCamelCase_ , [3, 2_00] ) # mai should be <unk> (=3)
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Optional[Any] = 'Hello how are you'
_snake_case : int = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
self.assertEqual(lowerCamelCase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Tuple = 'Hello how are you'
_snake_case : List[Any] = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowerCamelCase_ ).input_ids , tokenizer(lowerCamelCase_ , do_phonemize=lowerCamelCase_ ).input_ids )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Union[str, Any] = 'Hello how are you'
_snake_case : List[str] = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
_snake_case : int = tokenizer.decode(tokenizer(lowerCamelCase_ ).input_ids )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_snake_case : int = tokenizer.decode(sample_ids[0] )
_snake_case : Dict = tokenizer.batch_decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch_tokens[0] )
self.assertEqual(lowerCamelCase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
_snake_case : Any = 'Hello how are you'
_snake_case : int = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
self.assertEqual(lowerCamelCase_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
_snake_case : Optional[Any] = 'Hello how are you'
_snake_case : str = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowerCamelCase_ ).input_ids , tokenizer(lowerCamelCase_ , do_phonemize=lowerCamelCase_ ).input_ids )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
_snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_snake_case : Tuple = tokenizer.decode(sample_ids[0] )
_snake_case : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch_tokens[0] )
self.assertEqual(lowerCamelCase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
_snake_case : Optional[int] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCamelCase_ )
_snake_case : str = tokenizer.batch_decode(lowerCamelCase_ , filter_word_delimiter_token=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch_tokens[0] )
self.assertEqual(lowerCamelCase_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
_snake_case : Union[str, Any] = 'Hello how are you'
_snake_case : List[Any] = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
_snake_case : str = tokenizer.decode(tokenizer(lowerCamelCase_ ).input_ids , filter_word_delimiter_token=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
_snake_case : Any = 'Hello how are you'
_snake_case : Optional[int] = tokenizer.phonemize(lowerCamelCase_ , phonemizer_lang='en-us' )
_snake_case : Optional[int] = tokenizer.decode(tokenizer(lowerCamelCase_ ).input_ids , filter_word_delimiter_token=lowerCamelCase_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=lowerCamelCase_ )
_snake_case : Optional[int] = 'Hello how are you'
_snake_case : Optional[Any] = tokenizer(lowerCamelCase_ , phonemizer_lang='en-us' ).input_ids
_snake_case : Any = tokenizer(lowerCamelCase_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = tokenizer.decode(lowerCamelCase_ )
_snake_case : Union[str, Any] = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(lowerCamelCase_ , 'ɛ l o h aʊ a ʁ j u' )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
_snake_case : Tuple = 'Hello how Are you'
_snake_case : Any = 'hello how are you'
_snake_case : Dict = tokenizer(lowerCamelCase_ ).input_ids
_snake_case : Optional[Any] = tokenizer(lowerCamelCase_ ).input_ids
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
_snake_case : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
_snake_case : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : str = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_snake_case : List[Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_snake_case : Any = tokenizer.decode(lowerCamelCase_ , output_char_offsets=lowerCamelCase_ , filter_word_delimiter_token=lowerCamelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCamelCase_ ) )
# transform list to ModelOutput
_snake_case : Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(lowerCamelCase_ : Dict , lowerCamelCase_ : int ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
[recursive_check(lowerCamelCase_ , lowerCamelCase_ ) for la, la in zip(lowerCamelCase_ , lowerCamelCase_ )]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
_snake_case : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_snake_case : Tuple = tokenizer.batch_decode(lowerCamelCase_ , output_char_offsets=lowerCamelCase_ )
_snake_case : Union[str, Any] = [tokenizer.decode(lowerCamelCase_ , output_char_offsets=lowerCamelCase_ ) for ids in sample_ids]
check_list_tuples_equal(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case : int = tokenizer.vocab_size
_snake_case : Union[str, Any] = len(lowerCamelCase_ )
self.assertNotEqual(lowerCamelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case : Optional[int] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_snake_case : List[str] = tokenizer.add_tokens(lowerCamelCase_ )
_snake_case : Any = tokenizer.vocab_size
_snake_case : Dict = len(lowerCamelCase_ )
self.assertNotEqual(lowerCamelCase_ , 0 )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , len(lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , all_size + len(lowerCamelCase_ ) )
_snake_case : List[str] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowerCamelCase_ )
self.assertGreaterEqual(len(lowerCamelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_snake_case : List[str] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_snake_case : int = tokenizer.add_special_tokens(lowerCamelCase_ )
_snake_case : List[str] = tokenizer.vocab_size
_snake_case : str = len(lowerCamelCase_ )
self.assertNotEqual(lowerCamelCase_ , 0 )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , len(lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , all_size_a + len(lowerCamelCase_ ) )
_snake_case : Dict = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowerCamelCase_ )
self.assertGreaterEqual(len(lowerCamelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case : int = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
_snake_case : Union[str, Any] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(output['text'] , lowerCamelCase_ )
| 721 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowercase_ : Optional[int] = None
lowercase_ : Tuple = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : Dict = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
lowercase_ : int = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
lowercase_ : Dict = '''▁'''
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = AlbertTokenizer
def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Dict=False , lowerCamelCase_ : str="[CLS]" , lowerCamelCase_ : Optional[int]="[SEP]" , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : Union[str, Any]="[SEP]" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Optional[Any]="[CLS]" , lowerCamelCase_ : int="[MASK]" , **lowerCamelCase_ : int , ):
'''simple docstring'''
_snake_case : List[Any] = (
AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ , normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
else mask_token
)
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
_snake_case : List[str] = do_lower_case
_snake_case : Union[str, Any] = remove_space
_snake_case : List[Any] = keep_accents
_snake_case : Dict = vocab_file
_snake_case : Dict = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Dict = [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : Tuple = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCamelCase : List[Any] = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCamelCase : int = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCamelCase : int = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Any = False
@property
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return 1_00
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_snake_case : str = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = self.dummy_unet
_snake_case : int = self.dummy_movq
_snake_case : Any = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase_ , )
_snake_case : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=0 ):
'''simple docstring'''
_snake_case : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_snake_case : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create hint
_snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
_snake_case : int = torch.manual_seed(lowerCamelCase_ )
else:
_snake_case : int = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_snake_case : str = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = 'cpu'
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : Union[str, Any] = self.pipeline_class(**lowerCamelCase_ )
_snake_case : Optional[Any] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
_snake_case : List[Any] = output.images
_snake_case : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
_snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Tuple = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
_snake_case : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_snake_case : List[Any] = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
_snake_case : Optional[int] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_snake_case : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
_snake_case : str = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_snake_case : Any = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A robot, 4k photo'
_snake_case : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case : List[str] = pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_snake_case : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case : Optional[Any] = pipeline(
image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , hint=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , output_type='np' , )
_snake_case : List[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "perceiver"
def __init__( self : Union[str, Any] , lowerCamelCase_ : List[Any]=2_56 , lowerCamelCase_ : Optional[int]=12_80 , lowerCamelCase_ : int=7_68 , lowerCamelCase_ : str=1 , lowerCamelCase_ : List[Any]=26 , lowerCamelCase_ : Optional[int]=8 , lowerCamelCase_ : Tuple=8 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : str="kv" , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=2_62 , lowerCamelCase_ : Any=20_48 , lowerCamelCase_ : str=56 , lowerCamelCase_ : Any=[3_68, 4_96] , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : str=19_20 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : Tuple=[1, 16, 2_24, 2_24] , **lowerCamelCase_ : Any , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : Tuple = num_latents
_snake_case : Tuple = d_latents
_snake_case : int = d_model
_snake_case : Tuple = num_blocks
_snake_case : List[str] = num_self_attends_per_block
_snake_case : List[Any] = num_self_attention_heads
_snake_case : str = num_cross_attention_heads
_snake_case : Any = qk_channels
_snake_case : Dict = v_channels
_snake_case : List[Any] = cross_attention_shape_for_attention
_snake_case : List[Any] = self_attention_widening_factor
_snake_case : Union[str, Any] = cross_attention_widening_factor
_snake_case : Union[str, Any] = hidden_act
_snake_case : str = attention_probs_dropout_prob
_snake_case : Dict = initializer_range
_snake_case : Optional[int] = layer_norm_eps
_snake_case : str = use_query_residual
# masked language modeling attributes
_snake_case : Union[str, Any] = vocab_size
_snake_case : int = max_position_embeddings
# image classification attributes
_snake_case : Optional[Any] = image_size
# flow attributes
_snake_case : Any = train_size
# multimodal autoencoding attributes
_snake_case : Optional[int] = num_frames
_snake_case : Union[str, Any] = audio_samples_per_frame
_snake_case : List[Any] = samples_per_patch
_snake_case : Optional[Any] = output_shape
class lowercase ( a_ ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return 1e-4
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[TensorType] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : str = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case : List[Any] = preprocessor.num_special_tokens_to_add(lowerCamelCase_ )
_snake_case : Union[str, Any] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
_snake_case : int = [' '.join(['a'] ) * seq_length] * batch_size
_snake_case : List[str] = dict(preprocessor(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
_snake_case : str = inputs.pop('input_ids' )
return inputs
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : str = compute_effective_axis_dimension(lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_snake_case : List[str] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Dict = dict(preprocessor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
_snake_case : int = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
_UpperCamelCase : float
_UpperCamelCase : TreeNode | None = None
_UpperCamelCase : TreeNode | None = None
def A__( __lowerCAmelCase ):
# Validation
def is_valid_tree(__lowerCAmelCase ) -> bool:
if node is None:
return True
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__lowerCAmelCase ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __lowerCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __lowerCAmelCase )
)
return is_binary_search_tree_recursive_check(__lowerCAmelCase , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : str = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
lowercase_ : List[str] = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
lowercase_ : List[str] = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
_UpperCamelCase : Any = DistilBertTokenizer
def __init__( self : int , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]="[UNK]" , lowerCamelCase_ : List[str]="[SEP]" , lowerCamelCase_ : int="[PAD]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , lowerCamelCase_ : Any="[MASK]" , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
_snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCamelCase_ ) != tokenize_chinese_chars
):
_snake_case : Tuple = getattr(lowerCamelCase_ , normalizer_state.pop('type' ) )
_snake_case : Optional[Any] = do_lower_case
_snake_case : str = strip_accents
_snake_case : List[str] = tokenize_chinese_chars
_snake_case : List[Any] = normalizer_class(**lowerCamelCase_ )
_snake_case : List[Any] = do_lower_case
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
_snake_case : Dict = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ : List[str] = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
_snake_case : List[Any] = self.layer[current_layer](lowerCamelCase_ , lowerCamelCase_ , head_mask[current_layer] )
_snake_case : Union[str, Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , a_ , )
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : str ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
_snake_case : List[str] = BertEncoderWithPabee(lowerCamelCase_ )
self.init_weights()
_snake_case : Tuple = 0
_snake_case : List[str] = 0
_snake_case : Dict = 0
_snake_case : Optional[int] = 0
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = threshold
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Dict = patience
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = 0
_snake_case : str = 0
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = self.inference_layers_num / self.inference_instances_num
_snake_case : str = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCamelCase_ )
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Tuple=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_snake_case : List[str] = input_ids.size()
elif inputs_embeds is not None:
_snake_case : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_snake_case : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_snake_case : List[Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ )
if token_type_ids is None:
_snake_case : Optional[int] = torch.zeros(lowerCamelCase_ , dtype=torch.long , device=lowerCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_snake_case : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_snake_case : Tuple = encoder_hidden_states.size()
_snake_case : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_snake_case : Dict = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ )
_snake_case : int = self.invert_attention_mask(lowerCamelCase_ )
else:
_snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_snake_case : Any = self.get_head_mask(lowerCamelCase_ , self.config.num_hidden_layers )
_snake_case : Dict = self.embeddings(
input_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ )
_snake_case : List[Any] = embedding_output
if self.training:
_snake_case : int = []
for i in range(self.config.num_hidden_layers ):
_snake_case : Tuple = self.encoder.adaptive_forward(
lowerCamelCase_ , current_layer=lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ )
_snake_case : int = self.pooler(lowerCamelCase_ )
_snake_case : List[str] = output_layers[i](output_dropout(lowerCamelCase_ ) )
res.append(lowerCamelCase_ )
elif self.patience == 0: # Use all layers for inference
_snake_case : Union[str, Any] = self.encoder(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
_snake_case : int = self.pooler(encoder_outputs[0] )
_snake_case : str = [output_layers[self.config.num_hidden_layers - 1](lowerCamelCase_ )]
else:
_snake_case : Dict = 0
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_snake_case : List[str] = self.encoder.adaptive_forward(
lowerCamelCase_ , current_layer=lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ )
_snake_case : List[str] = self.pooler(lowerCamelCase_ )
_snake_case : Dict = output_layers[i](lowerCamelCase_ )
if regression:
_snake_case : Union[str, Any] = logits.detach()
if patient_result is not None:
_snake_case : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_snake_case : int = 0
else:
_snake_case : Optional[Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
_snake_case : Optional[int] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCamelCase_ ) ):
patient_counter += 1
else:
_snake_case : Optional[int] = 0
_snake_case : Dict = logits
if patient_counter == self.patience:
break
_snake_case : Any = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , a_ , )
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
_snake_case : List[str] = config.num_labels
_snake_case : List[Any] = BertModelWithPabee(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(config.hidden_dropout_prob )
_snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[str]=None , ):
'''simple docstring'''
_snake_case : Tuple = self.bert(
input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_snake_case : int = (logits[-1],)
if labels is not None:
_snake_case : Dict = None
_snake_case : Optional[Any] = 0
for ix, logits_item in enumerate(lowerCamelCase_ ):
if self.num_labels == 1:
# We are doing regression
_snake_case : int = MSELoss()
_snake_case : Optional[int] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_snake_case : str = CrossEntropyLoss()
_snake_case : Any = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_snake_case : List[str] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_snake_case : Optional[int] = (total_loss / total_weights,) + outputs
return outputs
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ : List[str] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
from itertools import count
def A__( __lowerCAmelCase = 50 ):
_snake_case : str = [1] * min_block_length
for n in count(__lowerCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(__lowerCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import random
def A__( __lowerCAmelCase ):
_snake_case : List[str] = num - 1
_snake_case : Optional[int] = 0
while s % 2 == 0:
_snake_case : List[str] = s // 2
t += 1
for _ in range(5 ):
_snake_case : Dict = random.randrange(2 , num - 1 )
_snake_case : int = pow(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if v != 1:
_snake_case : Tuple = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_snake_case : List[Any] = i + 1
_snake_case : Union[str, Any] = (v**2) % num
return True
def A__( __lowerCAmelCase ):
if num < 2:
return False
_snake_case : Tuple = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowerCAmelCase )
def A__( __lowerCAmelCase = 10_24 ):
while True:
_snake_case : Optional[int] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__lowerCAmelCase ):
return num
if __name__ == "__main__":
lowercase_ : List[str] = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Union[str, Any] = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Any = (DEISMultistepScheduler,)
_UpperCamelCase : List[str] = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self : Union[str, Any] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCamelCase_ )
return config
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int]=0 , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : Any = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : int = self.dummy_sample
_snake_case : Optional[Any] = 0.1 * sample
_snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[Any] = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : List[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : List[Any] = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
_snake_case : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : int = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[int]=0 , **lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : Any = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : str = self.dummy_sample
_snake_case : str = 0.1 * sample
_snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : Any = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Tuple = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if scheduler is None:
_snake_case : str = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : int = scheduler_class(**lowerCamelCase_ )
_snake_case : str = 10
_snake_case : Tuple = self.dummy_model()
_snake_case : int = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Any = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = dict(self.forward_default_kwargs )
_snake_case : List[str] = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[int] = self.get_scheduler_config()
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ )
_snake_case : str = self.dummy_sample
_snake_case : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , 'set_timesteps' ):
_snake_case : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case : Dict = scheduler.timesteps[5]
_snake_case : Tuple = scheduler.timesteps[6]
_snake_case : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
_snake_case : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
_snake_case : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case : str = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case : Tuple = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : str = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type='deis' , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
_snake_case : List[str] = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = self.full_loop(prediction_type='v_prediction' )
_snake_case : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
_snake_case : List[Any] = 10
_snake_case : Optional[int] = self.dummy_model()
_snake_case : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : List[Any] = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
from __future__ import annotations
import numpy as np
def A__( __lowerCAmelCase ):
_snake_case : int = np.shape(__lowerCAmelCase )
if rows != columns:
_snake_case : Tuple = (
'\'table\' has to be of square shaped array but got a '
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
_snake_case : Union[str, Any] = np.zeros((rows, columns) )
_snake_case : List[Any] = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
_snake_case : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_snake_case : Union[str, Any] = (table[i][j] - total) / upper[j][j]
_snake_case : int = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
_snake_case : List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : Optional[int] = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
lowercase_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowercase_ : List[Any] = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowercase_ : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
lowercase_ : Dict = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
lowercase_ : List[str] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
lowercase_ : Tuple = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
lowercase_ : int = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
lowercase_ : str = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
lowercase_ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
lowercase_ : Any = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
lowercase_ : int = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
lowercase_ : Dict = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
lowercase_ : Optional[int] = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
lowercase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : List[Any] = FLAX_MODEL_MAPPING
lowercase_ : Optional[int] = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase_ : str = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase_ : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : int = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase_ : Dict = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : List[str] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowercase ( _BaseAutoModelClass ):
"""simple docstring"""
_UpperCamelCase : Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase_ : int = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 714 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase_ : Dict = logging.get_logger(__name__)
# General docstring
lowercase_ : Optional[int] = '''RegNetConfig'''
# Base docstring
lowercase_ : List[Any] = '''facebook/regnet-y-040'''
lowercase_ : int = [1, 1088, 7, 7]
# Image classification docstring
lowercase_ : Optional[int] = '''facebook/regnet-y-040'''
lowercase_ : List[str] = '''tabby, tabby cat'''
lowercase_ : int = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[str] = "relu" , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_snake_case : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_snake_case : Tuple = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=lowerCamelCase_ , strides=lowerCamelCase_ , padding='VALID' , groups=lowerCamelCase_ , use_bias=lowerCamelCase_ , name='convolution' , )
_snake_case : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
_snake_case : Dict = ACTaFN[activation] if activation is not None else tf.identity
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.convolution(self.padding(lowerCamelCase_ ) )
_snake_case : Optional[int] = self.normalization(lowerCamelCase_ )
_snake_case : Tuple = self.activation(lowerCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : str = config.num_channels
_snake_case : Tuple = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : int = shape_list(lowerCamelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_snake_case : int = tf.transpose(lowerCamelCase_ , perm=(0, 2, 3, 1) )
_snake_case : Optional[int] = self.embedder(lowerCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : Dict ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=1 , strides=lowerCamelCase_ , use_bias=lowerCamelCase_ , name='convolution' )
_snake_case : Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False ):
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase_ ) , training=lowerCamelCase_ )
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name='pooler' )
_snake_case : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : int = self.pooler(lowerCamelCase_ )
for layer_module in self.attention:
_snake_case : Dict = layer_module(lowerCamelCase_ )
_snake_case : Tuple = hidden_state * pooled
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : List[Any] = in_channels != out_channels or stride != 1
_snake_case : Tuple = max(1 , out_channels // config.groups_width )
_snake_case : List[str] = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_snake_case : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name='layer.2' ),
]
_snake_case : int = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self : int , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Dict = hidden_state
for layer_module in self.layers:
_snake_case : Union[str, Any] = layer_module(lowerCamelCase_ )
_snake_case : Optional[Any] = self.shortcut(lowerCamelCase_ )
hidden_state += residual
_snake_case : Union[str, Any] = self.activation(lowerCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : List[str] = in_channels != out_channels or stride != 1
_snake_case : Optional[int] = max(1 , out_channels // config.groups_width )
_snake_case : Optional[Any] = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_snake_case : Union[str, Any] = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name='layer.3' ),
]
_snake_case : List[Any] = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = hidden_state
for layer_module in self.layers:
_snake_case : str = layer_module(lowerCamelCase_ )
_snake_case : int = self.shortcut(lowerCamelCase_ )
hidden_state += residual
_snake_case : Union[str, Any] = self.activation(lowerCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_snake_case : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , name='layers.0' ),
*[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
for layer_module in self.layers:
_snake_case : str = layer_module(lowerCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : int = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_snake_case : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ , name=f'''stages.{i+1}''' ) )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ):
'''simple docstring'''
_snake_case : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Dict = hidden_states + (hidden_state,)
_snake_case : Tuple = stage_module(lowerCamelCase_ )
if output_hidden_states:
_snake_case : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
@keras_serializable
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = RegNetConfig
def __init__( self : Dict , lowerCamelCase_ : Tuple , **lowerCamelCase_ : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : Optional[int] = config
_snake_case : Tuple = TFRegNetEmbeddings(lowerCamelCase_ , name='embedder' )
_snake_case : Optional[Any] = TFRegNetEncoder(lowerCamelCase_ , name='encoder' )
_snake_case : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name='pooler' )
@unpack_inputs
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : List[str] = self.embedder(lowerCamelCase_ , training=lowerCamelCase_ )
_snake_case : Union[str, Any] = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
_snake_case : Optional[int] = encoder_outputs[0]
_snake_case : List[Any] = self.pooler(lowerCamelCase_ )
# Change to NCHW output format have uniformity in the modules
_snake_case : int = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
_snake_case : Dict = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_snake_case : str = tuple([tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : str = RegNetConfig
_UpperCamelCase : Tuple = "regnet"
_UpperCamelCase : List[str] = "pixel_values"
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowercase_ : int = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase_ : List[str] = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , a_ , )
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : Any = TFRegNetMainLayer(lowerCamelCase_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[int]=False , ):
'''simple docstring'''
_snake_case : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : List[str] = self.regnet(
pixel_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a_ , )
class lowercase ( a_ , a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : Dict , **lowerCamelCase_ : int ):
'''simple docstring'''
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : Any = config.num_labels
_snake_case : Optional[int] = TFRegNetMainLayer(lowerCamelCase_ , name='regnet' )
# classification head
_snake_case : Any = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Tuple=False , ):
'''simple docstring'''
_snake_case : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Optional[Any] = self.regnet(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
_snake_case : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Tuple = self.classifier[0](lowerCamelCase_ )
_snake_case : Tuple = self.classifier[1](lowerCamelCase_ )
_snake_case : Dict = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase_ , logits=lowerCamelCase_ )
if not return_dict:
_snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 715 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCamelCase_ : List[str] = TypeVar('''T''')
class lowercase ( Generic[T] ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : list[T] , lowerCamelCase_ : Callable[[T, T], T] ):
'''simple docstring'''
_snake_case : Any | T = None
_snake_case : int = len(lowerCamelCase_ )
_snake_case : list[T] = [any_type for _ in range(self.N )] + arr
_snake_case : int = fnc
self.build()
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
_snake_case : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : T ):
'''simple docstring'''
p += self.N
_snake_case : Any = v
while p > 1:
_snake_case : Union[str, Any] = p // 2
_snake_case : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : int ): # noqa: E741
'''simple docstring'''
_snake_case : Union[str, Any] = l + self.N, r + self.N
_snake_case : T | None = None
while l <= r:
if l % 2 == 1:
_snake_case : int = self.st[l] if res is None else self.fn(lowerCamelCase_ , self.st[l] )
if r % 2 == 0:
_snake_case : Any = self.st[r] if res is None else self.fn(lowerCamelCase_ , self.st[r] )
_snake_case : Dict = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCamelCase_ : Tuple = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCamelCase_ : List[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCamelCase_ : Optional[Any] = SegmentTree(test_array, min)
UpperCamelCase_ : Dict = SegmentTree(test_array, max)
UpperCamelCase_ : Optional[int] = SegmentTree(test_array, lambda a, b: a + b)
def A__( ):
for i in range(len(__lowerCAmelCase ) ):
for j in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
_snake_case : Any = reduce(__lowerCAmelCase , test_array[i : j + 1] )
_snake_case : str = reduce(__lowerCAmelCase , test_array[i : j + 1] )
_snake_case : Optional[Any] = reduce(lambda __lowerCAmelCase , __lowerCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowerCAmelCase , __lowerCAmelCase )
assert max_range == max_segment_tree.query(__lowerCAmelCase , __lowerCAmelCase )
assert sum_range == sum_segment_tree.query(__lowerCAmelCase , __lowerCAmelCase )
test_all_segments()
for index, value in test_updates.items():
UpperCamelCase_ : Union[str, Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Tuple = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_snake_case : int = DatasetInfosDict.from_directory(__lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = str(__lowerCAmelCase )
dataset_info.write_to_directory(__lowerCAmelCase )
_snake_case : Union[str, Any] = DatasetInfo.from_directory(__lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowerCAmelCase , 'dataset_info.json' ) )
def A__( ):
_snake_case : Optional[int] = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
_snake_case : int = dataset_info._to_yaml_dict()
assert sorted(__lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_snake_case : str = yaml.safe_dump(__lowerCAmelCase )
_snake_case : Tuple = yaml.safe_load(__lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def A__( ):
_snake_case : int = DatasetInfo()
_snake_case : List[str] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = str(__lowerCAmelCase )
dataset_infos_dict.write_to_directory(__lowerCAmelCase )
_snake_case : List[str] = DatasetInfosDict.from_directory(__lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_snake_case : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_snake_case : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowerCAmelCase , 'README.md' ) )
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
_snake_case : Dict = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' , revision=__lowerCAmelCase )
| 718 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 0 |
def A__( __lowerCAmelCase ):
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_snake_case : Any = False
if low == high:
return swapped
_snake_case : Optional[int] = low
_snake_case : int = high
while left < right:
if collection[left] > collection[right]:
_snake_case : Dict = (
collection[right],
collection[left],
)
_snake_case : Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_snake_case : Any = (
collection[right + 1],
collection[left],
)
_snake_case : Tuple = True
_snake_case : Any = low + int((high - low) / 2 )
_snake_case : List[Any] = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_snake_case : List[str] = True
while is_not_sorted is True:
_snake_case : int = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
lowercase_ : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ : Any = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 719 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : List[str] = "linear"
_UpperCamelCase : Optional[int] = "cosine"
_UpperCamelCase : Union[str, Any] = "cosine_with_restarts"
_UpperCamelCase : Tuple = "polynomial"
_UpperCamelCase : Union[str, Any] = "constant"
_UpperCamelCase : str = "constant_with_warmup"
_UpperCamelCase : Tuple = "piecewise_constant"
def A__( __lowerCAmelCase , __lowerCAmelCase = -1 ):
return LambdaLR(__lowerCAmelCase , lambda __lowerCAmelCase : 1 , last_epoch=__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 ):
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1.0 , __lowerCAmelCase ) )
return 1.0
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 ):
_snake_case : str = {}
_snake_case : Optional[int] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_snake_case : Tuple = rule_str.split(':' )
_snake_case : Union[str, Any] = int(__lowerCAmelCase )
_snake_case : Dict = float(__lowerCAmelCase )
_snake_case : Any = value
_snake_case : List[str] = float(rule_list[-1] )
def create_rules_function(__lowerCAmelCase , __lowerCAmelCase ):
def rule_func(__lowerCAmelCase ) -> float:
_snake_case : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : Any = create_rules_function(__lowerCAmelCase , __lowerCAmelCase )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=-1 ):
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.5 , __lowerCAmelCase = -1 ):
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = -1 ):
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
_snake_case : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-7 , __lowerCAmelCase=1.0 , __lowerCAmelCase=-1 ):
_snake_case : List[Any] = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : List[Any] = lr_init - lr_end
_snake_case : Dict = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = -1 , ):
_snake_case : List[str] = SchedulerType(__lowerCAmelCase )
_snake_case : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCAmelCase , last_epoch=__lowerCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCAmelCase , step_rules=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , num_cycles=__lowerCAmelCase , last_epoch=__lowerCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , power=__lowerCAmelCase , last_epoch=__lowerCAmelCase , )
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if exponent == 1:
return base
if exponent % 2 == 0:
_snake_case : Tuple = _modexpt(__lowerCAmelCase , exponent // 2 , __lowerCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__lowerCAmelCase , exponent - 1 , __lowerCAmelCase )) % modulo_value
def A__( __lowerCAmelCase = 17_77 , __lowerCAmelCase = 18_55 , __lowerCAmelCase = 8 ):
_snake_case : List[str] = base
for _ in range(1 , __lowerCAmelCase ):
_snake_case : Any = _modexpt(__lowerCAmelCase , __lowerCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : List[str] = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "xmod"
def __init__( self : Tuple , lowerCamelCase_ : Dict=3_05_22 , lowerCamelCase_ : Optional[int]=7_68 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : Optional[int]=30_72 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Optional[int]=5_12 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : Optional[Any]=1e-12 , lowerCamelCase_ : str=1 , lowerCamelCase_ : Tuple=0 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Optional[int]="absolute" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : int=False , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Dict=("en_XX",) , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : int = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : List[Any] = num_hidden_layers
_snake_case : int = num_attention_heads
_snake_case : List[Any] = hidden_act
_snake_case : List[str] = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : int = position_embedding_type
_snake_case : Optional[int] = use_cache
_snake_case : Any = classifier_dropout
_snake_case : int = pre_norm
_snake_case : Optional[int] = adapter_reduction_factor
_snake_case : Dict = adapter_layer_norm
_snake_case : Optional[Any] = adapter_reuse_layer_norm
_snake_case : List[str] = ln_before_adapter
_snake_case : List[str] = list(lowerCamelCase_ )
_snake_case : Optional[int] = default_language
class lowercase ( a_ ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase :
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=6 , lowerCamelCase_ : Union[str, Any]=17 , lowerCamelCase_ : Union[str, Any]=23 , lowerCamelCase_ : Tuple=11 , lowerCamelCase_ : Optional[Any]=True , ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : int = seq_length
_snake_case : Dict = act_dim
_snake_case : Union[str, Any] = state_dim
_snake_case : Optional[int] = hidden_size
_snake_case : List[Any] = max_length
_snake_case : Union[str, Any] = is_training
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_snake_case : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_snake_case : List[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
_snake_case : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
_snake_case : List[str] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
_snake_case : int = random_attention_mask((self.batch_size, self.seq_length) )
_snake_case : Union[str, Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
_snake_case : str = DecisionTransformerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : str = model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.prepare_config_and_inputs()
(
_snake_case
) : Union[str, Any] = config_and_inputs
_snake_case : List[Any] = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = (DecisionTransformerModel,) if is_torch_available() else ()
_UpperCamelCase : Union[str, Any] = ()
_UpperCamelCase : Optional[int] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_UpperCamelCase : int = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_UpperCamelCase : Dict = False
_UpperCamelCase : str = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : str = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : str = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = DecisionTransformerModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@slow
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Dict = DecisionTransformerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[Any] = model_class(lowerCamelCase_ )
_snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Optional[Any] = [*signature.parameters.keys()]
_snake_case : Optional[Any] = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(lowerCamelCase_ )] , lowerCamelCase_ )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = 2 # number of steps of autoregressive prediction we will perform
_snake_case : str = 10 # defined by the RL environment, may be normalized
_snake_case : Optional[Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
_snake_case : int = model.to(lowerCamelCase_ )
_snake_case : Any = model.config
torch.manual_seed(0 )
_snake_case : str = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase_ , dtype=torch.floataa ) # env.reset()
_snake_case : int = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=lowerCamelCase_ )
_snake_case : Optional[int] = torch.tensor(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_snake_case : Union[str, Any] = state
_snake_case : int = torch.zeros(1 , 0 , config.act_dim , device=lowerCamelCase_ , dtype=torch.floataa )
_snake_case : int = torch.zeros(1 , 0 , device=lowerCamelCase_ , dtype=torch.floataa )
_snake_case : Optional[int] = torch.tensor(0 , device=lowerCamelCase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCamelCase_ ):
_snake_case : int = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCamelCase_ )] , dim=1 )
_snake_case : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCamelCase_ )] , dim=1 )
_snake_case : int = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_snake_case : Optional[Any] = model(
states=lowerCamelCase_ , actions=lowerCamelCase_ , rewards=lowerCamelCase_ , returns_to_go=lowerCamelCase_ , timesteps=lowerCamelCase_ , attention_mask=lowerCamelCase_ , return_dict=lowerCamelCase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
_snake_case : Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
_snake_case : Union[str, Any] = action_pred[0, -1]
_snake_case : List[Any] = torch.cat([states, state] , dim=1 )
_snake_case : Dict = returns_to_go[0, -1] - reward
_snake_case : str = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_snake_case : Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCamelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
import math
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : int = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowerCAmelCase )
if number < 1:
_snake_case : List[str] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__lowerCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_snake_case : Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2
_snake_case : List[Any] = [3, 5]
_snake_case : Dict = 2
_snake_case : Union[str, Any] = 3
for block in range(1 , __lowerCAmelCase ):
for _ in range(__lowerCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase_ : Tuple = 0
try:
lowercase_ : Any = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Check if the input is valid
if not len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_snake_case : Optional[Any] = equationa
_snake_case : List[str] = equationa
# Calculate the determinants of the matrices
_snake_case : Optional[int] = aa * ba - aa * ba
_snake_case : Any = ca * ba - ca * ba
_snake_case : Dict = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case : str = determinant_x / determinant
_snake_case : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def A__( __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowercase :
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : str = TFVisionTextDualEncoderModel(lowerCamelCase_ )
_snake_case : Union[str, Any] = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase_ , text_model=lowerCamelCase_ )
_snake_case : Optional[int] = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str=None , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = {'vision_model': vision_model, 'text_model': text_model}
_snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase_ )
_snake_case : Dict = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int=None , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : int = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase_ , text_model=lowerCamelCase_ )
_snake_case : Optional[Any] = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
_snake_case : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[int] = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
_snake_case : Optional[int] = after_output[0].numpy()
_snake_case : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1e-5 )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int=None , **lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase_ , text_model=lowerCamelCase_ )
_snake_case : int = model(
input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , output_attentions=lowerCamelCase_ )
_snake_case : Tuple = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : str = to_atuple(vision_model.config.image_size )
_snake_case : Optional[int] = to_atuple(vision_model.config.patch_size )
_snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_snake_case : Optional[int] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : float ):
'''simple docstring'''
_snake_case : Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase_ , lowerCamelCase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : int = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase_ )
@slow
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_pretrained_model_and_inputs()
_snake_case : List[Any] = model_a(**lowerCamelCase_ )
_snake_case : Tuple = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase_ )
_snake_case : Any = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[Any] = model_a(**lowerCamelCase_ )
_snake_case : List[Any] = after_outputs[0].numpy()
_snake_case : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1e-5 )
@require_tf
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
_snake_case : str = 13
_snake_case : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_snake_case : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_snake_case : Any = random_attention_mask([batch_size, 4] )
_snake_case : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : int = TFViTModel(lowerCamelCase_ , name='vision_model' )
_snake_case : Any = TFBertModel(lowerCamelCase_ , name='text_model' )
return vision_model, text_model
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = TFViTModelTester(self )
_snake_case : Union[str, Any] = TFBertModelTester(self )
_snake_case : str = vit_model_tester.prepare_config_and_inputs()
_snake_case : Optional[int] = bert_model_tester.prepare_config_and_inputs()
_snake_case : Dict = vision_config_and_inputs
(
_snake_case
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
_snake_case : Dict = 13
_snake_case : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_snake_case : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_snake_case : Any = random_attention_mask([batch_size, 4] )
_snake_case : Optional[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any=None , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : Dict = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase_ , text_model=lowerCamelCase_ )
_snake_case : str = model(
input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , output_attentions=lowerCamelCase_ )
_snake_case : Any = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_snake_case : int = to_atuple(vision_model.config.image_size )
_snake_case : int = to_atuple(vision_model.config.patch_size )
_snake_case : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case : Tuple = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_snake_case : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Any = TFDeiTModel(lowerCamelCase_ , name='vision_model' )
_snake_case : Optional[Any] = TFRobertaModel(lowerCamelCase_ , name='text_model' )
return vision_model, text_model
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = TFDeiTModelTester(self )
_snake_case : Tuple = TFRobertaModelTester(self )
_snake_case : str = vit_model_tester.prepare_config_and_inputs()
_snake_case : Optional[int] = bert_model_tester.prepare_config_and_inputs()
_snake_case : Optional[Any] = vision_config_and_inputs
(
_snake_case
) : Union[str, Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
_snake_case : List[str] = 13
_snake_case : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_snake_case : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_snake_case : Tuple = random_attention_mask([batch_size, 4] )
_snake_case : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = TFCLIPVisionModel(lowerCamelCase_ , name='vision_model' )
_snake_case : Optional[int] = TFBertModel(lowerCamelCase_ , name='text_model' )
return vision_model, text_model
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = TFCLIPVisionModelTester(self )
_snake_case : Optional[Any] = TFBertModelTester(self )
_snake_case : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
_snake_case : Any = bert_model_tester.prepare_config_and_inputs()
_snake_case : Dict = vision_config_and_inputs
(
_snake_case
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowerCamelCase_ )
_snake_case : int = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_snake_case : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : str = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='np' )
_snake_case : int = model(**lowerCamelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_snake_case : Optional[int] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCamelCase_ , atol=1e-3 ) )
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "transfo-xl"
_UpperCamelCase : Dict = ["mems"]
_UpperCamelCase : Union[str, Any] = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , lowerCamelCase_ : List[str]=26_77_35 , lowerCamelCase_ : List[str]=[2_00_00, 4_00_00, 20_00_00] , lowerCamelCase_ : List[Any]=10_24 , lowerCamelCase_ : Dict=10_24 , lowerCamelCase_ : Union[str, Any]=16 , lowerCamelCase_ : int=64 , lowerCamelCase_ : List[Any]=40_96 , lowerCamelCase_ : str=4 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=18 , lowerCamelCase_ : List[Any]=16_00 , lowerCamelCase_ : Dict=10_00 , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Any=0 , lowerCamelCase_ : str=-1 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[str]="normal" , lowerCamelCase_ : Union[str, Any]=0.01 , lowerCamelCase_ : Optional[int]=0.01 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-5 , lowerCamelCase_ : int=0 , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
_snake_case : Union[str, Any] = vocab_size
_snake_case : Any = []
self.cutoffs.extend(lowerCamelCase_ )
if proj_share_all_but_first:
_snake_case : Any = [False] + [True] * len(self.cutoffs )
else:
_snake_case : Union[str, Any] = [False] + [False] * len(self.cutoffs )
_snake_case : Any = d_model
_snake_case : Tuple = d_embed
_snake_case : Any = d_head
_snake_case : int = d_inner
_snake_case : Optional[Any] = div_val
_snake_case : Dict = pre_lnorm
_snake_case : Optional[Any] = n_layer
_snake_case : Optional[int] = n_head
_snake_case : Optional[Any] = mem_len
_snake_case : Union[str, Any] = same_length
_snake_case : str = attn_type
_snake_case : str = clamp_len
_snake_case : str = sample_softmax
_snake_case : str = adaptive
_snake_case : int = dropout
_snake_case : Optional[Any] = dropatt
_snake_case : str = untie_r
_snake_case : Dict = init
_snake_case : str = init_range
_snake_case : Optional[Any] = proj_init_std
_snake_case : Optional[Any] = init_std
_snake_case : Any = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import math
def A__( __lowerCAmelCase ):
_snake_case : Any = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCAmelCase )
def A__( __lowerCAmelCase = 1 / 1_23_45 ):
_snake_case : str = 0
_snake_case : Optional[Any] = 0
_snake_case : Any = 3
while True:
_snake_case : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCAmelCase ):
_snake_case : Any = int(__lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(__lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Any ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : List[Any] = {}
def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = super().add_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str , *lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Dict = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
else:
_snake_case : int = []
for i in range(lowerCamelCase_ ):
_snake_case : List[Any] = placeholder_token + f'''_{i}'''
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
_snake_case : Optional[Any] = output
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Tuple=1.0 ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_snake_case : Dict = self.token_map[placeholder_token]
_snake_case : Tuple = tokens[: 1 + int(len(lowerCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
_snake_case : Optional[int] = copy.copy(lowerCamelCase_ )
random.shuffle(lowerCamelCase_ )
_snake_case : Dict = text.replace(lowerCamelCase_ , ' '.join(lowerCamelCase_ ) )
return text
def __call__( self : List[Any] , lowerCamelCase_ : Optional[Any] , *lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Union[str, Any]=1.0 , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , *lowerCamelCase_ : Any , lowerCamelCase_ : Dict=False , lowerCamelCase_ : int=1.0 , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__( __lowerCAmelCase ):
print('Loading config file...' )
def flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase="." ):
_snake_case : Tuple = []
for k, v in d.items():
_snake_case : str = parent_key + sep + k if parent_key else k
if isinstance(__lowerCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase , sep=__lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(__lowerCAmelCase )
_snake_case : Dict = argparse.Namespace()
with open(__lowerCAmelCase , 'r' ) as yaml_file:
try:
_snake_case : Tuple = yaml.load(__lowerCAmelCase , Loader=yaml.FullLoader )
_snake_case : Tuple = flatten_yaml_as_dict(__lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__lowerCAmelCase , str(__lowerCAmelCase ) ) )
return config
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = MobileViTVaConfig()
_snake_case : List[str] = False
# dataset
if task_name.startswith('imagenet1k_' ):
_snake_case : int = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
_snake_case : Optional[int] = 3_84
else:
_snake_case : Dict = 2_56
_snake_case : List[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
_snake_case : int = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
_snake_case : Optional[int] = 3_84
else:
_snake_case : Tuple = 2_56
_snake_case : int = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
_snake_case : int = 1_51
_snake_case : int = 5_12
_snake_case : Tuple = 'ade20k-id2label.json'
_snake_case : List[str] = True
elif task_name.startswith('voc_' ):
_snake_case : Union[str, Any] = 21
_snake_case : str = 5_12
_snake_case : int = 'pascal-voc-id2label.json'
_snake_case : Union[str, Any] = True
# orig_config
_snake_case : Tuple = load_orig_config_file(__lowerCAmelCase )
assert getattr(__lowerCAmelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : int = getattr(__lowerCAmelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__lowerCAmelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : str = getattr(__lowerCAmelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Any = getattr(__lowerCAmelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
_snake_case : Optional[int] = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
_snake_case : Dict = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
_snake_case : Optional[int] = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
_snake_case : Any = 'huggingface/label-files'
_snake_case : Optional[int] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_snake_case : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = dct.pop(__lowerCAmelCase )
_snake_case : int = val
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
if base_model:
_snake_case : Optional[int] = ''
else:
_snake_case : List[Any] = 'mobilevitv2.'
_snake_case : List[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[Any] = k[8:]
else:
_snake_case : Optional[Any] = k
if ".block." in k:
_snake_case : List[str] = k_new.replace('.block.' , '.' )
if ".conv." in k:
_snake_case : Optional[int] = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
_snake_case : Any = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
_snake_case : Dict = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
_snake_case : Any = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_snake_case : List[str] = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
_snake_case : Union[str, Any] = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
_snake_case : Union[str, Any] = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
_snake_case : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
_snake_case : Optional[int] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_snake_case : str = [0, 1]
elif i == 4:
_snake_case : str = [0, 1, 2, 3]
elif i == 5:
_snake_case : Tuple = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
_snake_case : Optional[Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
_snake_case : List[Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
_snake_case : Any = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_snake_case : Union[str, Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
_snake_case : Dict = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
_snake_case : List[str] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
_snake_case : Dict = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
_snake_case : int = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
_snake_case : Dict = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
_snake_case : str = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
_snake_case : Optional[int] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A__( __lowerCAmelCase ):
_snake_case : Any = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__( ):
_snake_case : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : List[Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[str] = get_mobilevitva_config(__lowerCAmelCase , __lowerCAmelCase )
# load original state_dict
_snake_case : Optional[int] = torch.load(__lowerCAmelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
_snake_case : Union[str, Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ).eval()
_snake_case : Tuple = False
else:
_snake_case : Any = MobileViTVaForImageClassification(__lowerCAmelCase ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Any = checkpoint
remove_unused_keys(__lowerCAmelCase )
_snake_case : str = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load modified state_dict
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case : Tuple = model(**__lowerCAmelCase )
# verify classification model
if task_name.startswith('imagenet' ):
_snake_case : Union[str, Any] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : int = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase_ : Optional[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def A__( __lowerCAmelCase ):
return input_array.reshape((input_array.size, 1) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Tuple = np.nan
for i in range(__lowerCAmelCase ):
_snake_case : List[Any] = features[:, labels == i]
_snake_case : int = data.mean(1 )
# Centralize the data of class i
_snake_case : List[Any] = data - column_reshape(__lowerCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__lowerCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case : List[str] = np.dot(__lowerCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = features.mean(1 )
_snake_case : Union[str, Any] = np.nan
for i in range(__lowerCAmelCase ):
_snake_case : str = features[:, labels == i]
_snake_case : int = data.shape[1]
_snake_case : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase ) , (column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case : List[Any] = device_data * np.dot(
column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase ) , (column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase )).T , )
return covariance_sum / features.shape[1]
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Check if the features have been loaded
if features.any():
_snake_case : Optional[Any] = features.mean(1 )
# Center the dataset
_snake_case : int = features - np.reshape(__lowerCAmelCase , (data_mean.size, 1) )
_snake_case : Optional[int] = np.dot(__lowerCAmelCase , centered_data.T ) / features.shape[1]
_snake_case : Any = np.linalg.eigh(__lowerCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_snake_case : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_snake_case : List[Any] = np.dot(filtered_eigenvectors.T , __lowerCAmelCase )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__lowerCAmelCase )
logging.error('Dataset empty' )
raise AssertionError
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_snake_case : Optional[int] = eigh(
covariance_between_classes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , covariance_within_classes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
_snake_case : Optional[int] = eigenvectors[:, ::-1][:, :dimensions]
_snake_case : Union[str, Any] = np.linalg.svd(__lowerCAmelCase )
_snake_case : int = svd_matrix[:, 0:dimensions]
_snake_case : List[str] = np.dot(filtered_svd_matrix.T , __lowerCAmelCase )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__lowerCAmelCase )
logging.error('Dataset empty' )
raise AssertionError
def A__( ):
# Create dummy dataset with 2 classes and 3 features
_snake_case : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_snake_case : str = np.array([0, 0, 0, 1, 1] )
_snake_case : Union[str, Any] = 2
_snake_case : List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__lowerCAmelCase ) as error_info:
_snake_case : Optional[Any] = linear_discriminant_analysis(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if isinstance(__lowerCAmelCase , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def A__( ):
_snake_case : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_snake_case : List[Any] = 2
_snake_case : Any = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(__lowerCAmelCase ) as error_info:
_snake_case : Dict = principal_component_analysis(__lowerCAmelCase , __lowerCAmelCase )
if not np.allclose(__lowerCAmelCase , __lowerCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class lowercase :
"""simple docstring"""
def __init__( self : List[Any] ):
'''simple docstring'''
_snake_case : list[Any] = []
_snake_case : int = 0
_snake_case : int = 0
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return self.head == self.tail
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Any ):
'''simple docstring'''
self.data.append(lowerCamelCase_ )
_snake_case : Union[str, Any] = self.tail + 1
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[str] = self.data[self.head]
_snake_case : Union[str, Any] = self.head + 1
return ret
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.tail - self.head
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : Tuple = data
_snake_case : MyNode | None = None
_snake_case : MyNode | None = None
_snake_case : int = 1
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.data
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return self.left
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.right
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.height
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : int = data
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : MyNode | None ):
'''simple docstring'''
_snake_case : int = node
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : MyNode | None ):
'''simple docstring'''
_snake_case : Dict = node
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : int = height
def A__( __lowerCAmelCase ):
if node is None:
return 0
return node.get_height()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if a > b:
return a
return b
def A__( __lowerCAmelCase ):
print('left rotation node:' , node.get_data() )
_snake_case : str = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__lowerCAmelCase )
_snake_case : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowerCAmelCase )
_snake_case : Any = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowerCAmelCase )
return ret
def A__( __lowerCAmelCase ):
print('right rotation node:' , node.get_data() )
_snake_case : str = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__lowerCAmelCase )
_snake_case : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowerCAmelCase )
_snake_case : Any = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowerCAmelCase )
return ret
def A__( __lowerCAmelCase ):
_snake_case : Union[str, Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__lowerCAmelCase ) )
return right_rotation(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
_snake_case : Union[str, Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__lowerCAmelCase ) )
return left_rotation(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if node is None:
return MyNode(__lowerCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __lowerCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_snake_case : int = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_snake_case : Union[str, Any] = right_rotation(__lowerCAmelCase )
else:
_snake_case : str = lr_rotation(__lowerCAmelCase )
else:
node.set_right(insert_node(node.get_right() , __lowerCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_snake_case : List[str] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_snake_case : Any = rl_rotation(__lowerCAmelCase )
else:
_snake_case : Any = left_rotation(__lowerCAmelCase )
_snake_case : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowerCAmelCase )
return node
def A__( __lowerCAmelCase ):
while True:
_snake_case : List[str] = root.get_right()
if right_child is None:
break
_snake_case : Union[str, Any] = right_child
return root.get_data()
def A__( __lowerCAmelCase ):
while True:
_snake_case : Union[str, Any] = root.get_left()
if left_child is None:
break
_snake_case : Tuple = left_child
return root.get_data()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = root.get_left()
_snake_case : Any = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_snake_case : List[Any] = get_left_most(__lowerCAmelCase )
root.set_data(__lowerCAmelCase )
root.set_right(del_node(__lowerCAmelCase , __lowerCAmelCase ) )
elif left_child is not None:
_snake_case : List[str] = left_child
elif right_child is not None:
_snake_case : Optional[Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(__lowerCAmelCase , __lowerCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__lowerCAmelCase , __lowerCAmelCase ) )
if get_height(__lowerCAmelCase ) - get_height(__lowerCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_snake_case : List[Any] = left_rotation(__lowerCAmelCase )
else:
_snake_case : Optional[Any] = rl_rotation(__lowerCAmelCase )
elif get_height(__lowerCAmelCase ) - get_height(__lowerCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_snake_case : Optional[int] = right_rotation(__lowerCAmelCase )
else:
_snake_case : str = lr_rotation(__lowerCAmelCase )
_snake_case : Tuple = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__lowerCAmelCase )
return root
class lowercase :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
_snake_case : MyNode | None = None
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
return get_height(self.root )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
print('insert:' + str(lowerCamelCase_ ) )
_snake_case : Any = insert_node(self.root , lowerCamelCase_ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Any ):
'''simple docstring'''
print('delete:' + str(lowerCamelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
_snake_case : Dict = del_node(self.root , lowerCamelCase_ )
def __str__( self : Dict , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
_snake_case : Tuple = ''
_snake_case : str = MyQueue()
q.push(self.root )
_snake_case : int = self.get_height()
if layer == 0:
return output
_snake_case : Union[str, Any] = 0
while not q.is_empty():
_snake_case : List[Any] = q.pop()
_snake_case : str = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCamelCase_ )
q.push(lowerCamelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_snake_case : List[str] = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , lowerCamelCase_ ) - 1:
_snake_case : List[str] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def A__( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase_ : int = AVLtree()
lowercase_ : str = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.