code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ : Tuple = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
__magic_name__ : Dict = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
__magic_name__ : Tuple = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = 0.0
for i, j in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 0.0
UpperCamelCase = n_correct / len(_SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 280 |
def lowercase__ ( _UpperCamelCase) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError('The given input must be positive')
# get the generated string sequence
UpperCamelCase = gray_code_sequence_string(_UpperCamelCase)
#
# convert them to integers
for i in range(len(_UpperCamelCase)):
UpperCamelCase = int(sequence[i] , 2)
return sequence
def lowercase__ ( _UpperCamelCase) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCamelCase = gray_code_sequence_string(bit_count - 1)
UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2):
UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(_UpperCamelCase)
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2)):
UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(_UpperCamelCase)
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = """Speech2TextFeatureExtractor"""
_snake_case : Dict = """Speech2TextTokenizer"""
def __init__( self , lowerCamelCase , lowerCamelCase ):
super().__init__(lowerCamelCase , lowerCamelCase )
__a = self.feature_extractor
__a = False
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__a = kwargs.pop("raw_speech" )
else:
__a = kwargs.pop("audio" , lowerCamelCase )
__a = kwargs.pop("sampling_rate" , lowerCamelCase )
__a = kwargs.pop("text" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__a = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase )
if text is not None:
__a = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings["input_ids"]
return inputs
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@contextmanager
def a__ ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__a = True
__a = self.tokenizer
yield
__a = self.feature_extractor
__a = False
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
SCREAMING_SNAKE_CASE__ = precision
SCREAMING_SNAKE_CASE__ = ceil(precision / 14 )
SCREAMING_SNAKE_CASE__ = 426_880 * Decimal(10_005 ).sqrt()
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 13_591_409
SCREAMING_SNAKE_CASE__ = Decimal(__UpperCAmelCase )
for k in range(1 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_A = 5_0
print(F'The first {n} digits of pi is: {pi(n)}')
| 159 | 0 |
from __future__ import annotations
def __lowercase( UpperCAmelCase__ = 4 ):
"""simple docstring"""
lowerCamelCase = abs(UpperCAmelCase__ ) or 4
return [[1 + x + y * row_size for x in range(UpperCAmelCase__ )] for y in range(UpperCAmelCase__ )]
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return reverse_row(transpose(UpperCAmelCase__ ) )
# OR.. transpose(reverse_column(matrix))
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return reverse_row(reverse_column(UpperCAmelCase__ ) )
# OR.. reverse_column(reverse_row(matrix))
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return reverse_column(transpose(UpperCAmelCase__ ) )
# OR.. transpose(reverse_row(matrix))
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = [list(UpperCAmelCase__ ) for x in zip(*UpperCAmelCase__ )]
return matrix
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = matrix[::-1]
return matrix
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = [x[::-1] for x in matrix]
return matrix
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
for i in matrix:
print(*UpperCAmelCase__ )
if __name__ == "__main__":
a_ : Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
a_ : str = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
a_ : Optional[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix)) | 707 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _a (self ):
'''simple docstring'''
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCamelCase = "xvjiarui/stable-diffusion-2-inpainting"
lowerCamelCase , lowerCamelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
lowerCamelCase = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCamelCase = jax.random.PRNGKey(0 )
lowerCamelCase = 50
lowerCamelCase = jax.device_count()
lowerCamelCase = num_samples * [prompt]
lowerCamelCase = num_samples * [init_image]
lowerCamelCase = num_samples * [mask_image]
lowerCamelCase , lowerCamelCase , lowerCamelCase = pipeline.prepare_inputs(__a , __a , __a )
# shard inputs and rng
lowerCamelCase = replicate(__a )
lowerCamelCase = jax.random.split(__a , jax.device_count() )
lowerCamelCase = shard(__a )
lowerCamelCase = shard(__a )
lowerCamelCase = shard(__a )
lowerCamelCase = pipeline(
__a , __a , __a , __a , __a , __a , jit=__a )
lowerCamelCase = output.images.reshape(__a , 5_12 , 5_12 , 3 )
lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 484 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase : Union[str, Any] = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __UpperCAmelCase ( A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
UpperCAmelCase_ : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
UpperCAmelCase_ : Optional[int] = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
UpperCAmelCase_ : Optional[Any] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 541 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : bool = False , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Optional[int] = nn.Embedding(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[Any] = nn.Embedding(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[int] = nn.Dropout(p=snake_case__ )
UpperCAmelCase__ : Optional[int] = TaConfig(
vocab_size=snake_case__ , d_model=snake_case__ , num_heads=snake_case__ , d_kv=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , feed_forward_proj=snake_case__ , is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , )
UpperCAmelCase__ : Tuple = nn.ModuleList()
for lyr_num in range(snake_case__ ):
UpperCAmelCase__ : Tuple = TaBlock(snake_case__ )
self.encoders.append(snake_case__ )
UpperCAmelCase__ : str = TaLayerNorm(snake_case__ )
UpperCAmelCase__ : Tuple = nn.Dropout(p=snake_case__ )
def __a ( self : int , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.token_embedder(snake_case__ )
UpperCAmelCase__ : Optional[int] = encoder_input_tokens.shape[1]
UpperCAmelCase__ : List[Any] = torch.arange(snake_case__ , device=encoder_input_tokens.device )
x += self.position_encoding(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = self.dropout_pre(snake_case__ )
# inverted the attention mask
UpperCAmelCase__ : List[Any] = encoder_input_tokens.size()
UpperCAmelCase__ : Tuple = self.get_extended_attention_mask(snake_case__ , snake_case__ )
for lyr in self.encoders:
UpperCAmelCase__ : Any = lyr(snake_case__ , snake_case__ )[0]
UpperCAmelCase__ : Any = self.layer_norm(snake_case__ )
return self.dropout_post(snake_case__ ), encoder_inputs_mask
| 438 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A__ ( __magic_name__ ):
lowercase = 'gpt_neox'
def __init__( self : str , a : List[str]=50_432 , a : Dict=6_144 , a : int=44 , a : Dict=64 , a : Dict=24_576 , a : List[str]="gelu" , a : Dict=0.2_5 , a : int=10_000 , a : List[Any]=0.0 , a : Tuple=0.0 , a : str=0.1 , a : Optional[int]=2_048 , a : Any=0.0_2 , a : Any=1E-5 , a : Optional[Any]=True , a : List[str]=0 , a : Any=2 , a : int=False , a : Dict=True , a : Optional[Any]=None , **a : Tuple , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : List[str] = rotary_pct
lowerCAmelCase__ : List[Any] = rotary_emb_base
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : str = classifier_dropout
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Tuple = use_cache
lowerCAmelCase__ : Any = tie_word_embeddings
lowerCAmelCase__ : Union[str, Any] = use_parallel_residual
lowerCAmelCase__ : int = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
lowerCAmelCase__ : Any = self.rope_scaling.get('type' , a )
lowerCAmelCase__ : List[Any] = self.rope_scaling.get('factor' , a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(a , a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' ) | 69 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , a : str="" , a : str="train" ):
'''simple docstring'''
assert os.path.isdir(a )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = os.listdir(a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a )
if not os.path.isfile(a ):
continue
self.documents.append(a )
def __len__( self : Any ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Dict , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.documents[idx]
lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1]
with open(a , encoding='utf-8' ) as source:
lowerCAmelCase__ : List[Any] = source.read()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a )
return document_name, story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ )
while True:
try:
lowerCAmelCase__ : int = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(SCREAMING_SNAKE_CASE_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) )
return story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) )
return sequence
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = sequence == pad_token_id
lowerCAmelCase__ : Optional[int] = 0
return mask
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines]
lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines]
lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = []
for sequence in batch:
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : int = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_ ) | 69 | 1 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _lowerCAmelCase :
def __init__(self , lowercase ):
if isinstance(A_ , A_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A_ : Optional[Any] = deepcopy(A_ )
elif os.path.exists(A_ ):
with io.open(A_ , """r""" , encoding="""utf-8""" ) as f:
A_ : str = json.load(A_ )
else:
try:
A_ : Union[str, Any] = baseaa.urlsafe_baadecode(A_ ).decode("""utf-8""" )
A_ : List[str] = json.loads(A_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
A_ : Optional[Any] = config
self.set_stage_and_offload()
def _a (self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
A_ : str = self.get_value("""zero_optimization.stage""" , -1 )
# offload
A_ : str = False
if self.is_zeroa() or self.is_zeroa():
A_ : Union[str, Any] = set(["""cpu""", """nvme"""] )
A_ : str = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A_ : Any = True
def _a (self , lowercase ):
A_ : int = self.config
# find the config node of interest if it exists
A_ : int = ds_key_long.split(""".""" )
A_ : Optional[int] = nodes.pop()
for node in nodes:
A_ : List[str] = config.get(A_ )
if config is None:
return None, ds_key
return config, ds_key
def _a (self , lowercase , lowercase=None ):
A_, A_ : Optional[Any] = self.find_config_node(A_ )
if config is None:
return default
return config.get(A_ , A_ )
def _a (self , lowercase , lowercase=False ):
A_ : Any = self.config
# find the config node of interest if it exists
A_ : Optional[Any] = ds_key_long.split(""".""" )
for node in nodes:
A_ : Dict = config
A_ : str = config.get(A_ )
if config is None:
if must_exist:
raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(A_ )
def _a (self , lowercase ):
A_ : int = self.get_value(A_ )
return False if value is None else bool(A_ )
def _a (self , lowercase ):
A_ : Optional[Any] = self.get_value(A_ )
return False if value is None else not bool(A_ )
def _a (self ):
return self._stage == 2
def _a (self ):
return self._stage == 3
def _a (self ):
return self._offload
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : Any = engine
def _a (self , lowercase , **lowercase ):
# runs backpropagation and handles mixed precision
self.engine.backward(A_ , **A_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _lowerCAmelCase ( lowerCAmelCase_ ):
def __init__(self , lowercase ):
super().__init__(A_ , device_placement=A_ , scaler=A_ )
A_ : Dict = hasattr(self.optimizer , """overflow""" )
def _a (self , lowercase=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _a (self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _a (self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _lowerCAmelCase ( lowerCAmelCase_ ):
def __init__(self , lowercase , lowercase ):
super().__init__(A_ , A_ )
def _a (self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=0.0_01 , lowercase=0 , **lowercase ):
A_ : Optional[Any] = params
A_ : Union[str, Any] = lr
A_ : Dict = weight_decay
A_ : str = kwargs
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None , lowercase=0 , **lowercase ):
A_ : Tuple = optimizer
A_ : str = total_num_steps
A_ : Any = warmup_num_steps
A_ : str = kwargs | 667 |
'''simple docstring'''
def lowercase__( _UpperCamelCase : str )-> str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(_UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 138 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __lowerCAmelCase ( __lowerCAmelCase : list[Any] ) -> None:
create_state_space_tree(__lowerCAmelCase , [] , 0 )
def __lowerCAmelCase ( __lowerCAmelCase : list[Any] , __lowerCAmelCase : list[Any] , __lowerCAmelCase : int ) -> None:
if index == len(__lowerCAmelCase ):
print(__lowerCAmelCase )
return
create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 711 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(SCREAMING_SNAKE_CASE__ ) , """Tatoeba directory does not exist.""" )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCAmelCase__ )
@slow
def lowercase_ (self ):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.resolver.write_model_card("opus-mt-he-en" , dry_run=lowerCAmelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 239 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : int =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : str ={'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] =TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ : Any =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Any ={'''text''': '''string'''}
SCREAMING_SNAKE_CASE_ : Dict =features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TextDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ : List[Any] =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : List[Any] ={'''text''': '''string'''}
SCREAMING_SNAKE_CASE_ : Tuple =TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , split=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
if issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =text_path
elif issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =[text_path]
SCREAMING_SNAKE_CASE_ : Optional[Any] =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Optional[Any] ={'''text''': '''string'''}
SCREAMING_SNAKE_CASE_ : Dict =TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=("train",) ) -> int:
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for split in splits:
SCREAMING_SNAKE_CASE_ : List[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Tuple =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Dict ={'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : Dict =TextDatasetReader({'''train''': text_path} , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ : List[str] =tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Optional[int] ={'''text''': '''string'''}
SCREAMING_SNAKE_CASE_ : Any =features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Optional[int] =(
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Tuple =TextDatasetReader({'''train''': text_path} , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ) -> Optional[Any]:
if split:
SCREAMING_SNAKE_CASE_ : List[str] ={split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] ='''train'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={'''train''': text_path, '''test''': text_path}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : List[str] ={'''text''': '''string'''}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 443 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int = 1_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE_ : int =2**power
SCREAMING_SNAKE_CASE_ : Tuple =0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 443 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=2 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=36 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , lowerCAmelCase_=1000 , ):
A_ : Optional[int] = parent
A_ : Tuple = batch_size
A_ : Optional[Any] = num_channels
A_ : Any = image_size
A_ : Any = patch_size
A_ : int = text_seq_length
A_ : int = is_training
A_ : Any = use_input_mask
A_ : Tuple = use_token_type_ids
A_ : List[str] = use_labels
A_ : Tuple = vocab_size
A_ : Optional[Any] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : List[Any] = type_sequence_label_size
A_ : Optional[Any] = initializer_range
A_ : Tuple = coordinate_size
A_ : Tuple = shape_size
A_ : Tuple = num_labels
A_ : Any = num_choices
A_ : Tuple = scope
A_ : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A_ : Optional[int] = text_seq_length
A_ : Any = (image_size // patch_size) ** 2 + 1
A_ : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowerCamelCase(self ):
A_ : int = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : Union[str, Any] = bbox[i, j, 3]
A_ : Optional[int] = bbox[i, j, 1]
A_ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : str = bbox[i, j, 2]
A_ : List[Any] = bbox[i, j, 0]
A_ : Union[str, Any] = t
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_input_mask:
A_ : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : Tuple = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A_ : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Dict = LayoutLMvaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# text + image
A_ : Optional[int] = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
A_ : Optional[Any] = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
A_ : List[str] = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
A_ : Tuple = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A_ : str = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A_ : List[Any] = model(pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[str] = self.num_labels
A_ : int = LayoutLMvaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : Any = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = self.num_labels
A_ : Union[str, Any] = LayoutLMvaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : List[Any] = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : Optional[Any] = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase(self ):
A_ : Optional[int] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Optional[Any] = config_and_inputs
A_ : str = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : Dict = False
_A : Union[str, Any] = False
_A : Any = False
_A : Optional[Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_A : Optional[int] = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def lowerCamelCase(self ):
A_ : Optional[Any] = LayoutLMvaModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
A_ : Any = copy.deepcopy(lowerCAmelCase_ )
if model_class in get_values(lowerCAmelCase_ ):
A_ : List[str] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
A_ : int = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in get_values(lowerCAmelCase_ ):
A_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
A_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
A_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
return inputs_dict
def lowerCamelCase(self ):
self.config_tester.run_common_tests()
def lowerCamelCase(self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Tuple = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
@slow
def lowerCamelCase(self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __UpperCamelCase ( ):
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase(self ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None
@slow
def lowerCamelCase(self ):
A_ : Optional[int] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(lowerCAmelCase_ )
A_ : List[Any] = self.default_image_processor
A_ : str = prepare_img()
A_ : Union[str, Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values.to(lowerCAmelCase_ )
A_ : Optional[int] = torch.tensor([[1, 2]] )
A_ : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
A_ : Optional[Any] = model(
input_ids=input_ids.to(lowerCAmelCase_ ) , bbox=bbox.to(lowerCAmelCase_ ) , pixel_values=pixel_values.to(lowerCAmelCase_ ) , )
# verify the logits
A_ : Optional[int] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
A_ : Union[str, Any] = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 480 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
_lowerCAmelCase = logging.getLogger(__name__)
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = git.Repo(search_parent_directories=snake_case__ )
A_ : List[str] = {
"""repo_id""": str(snake_case__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(snake_case__ , """git_log.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ , indent=4 )
def __UpperCamelCase ( snake_case__ ):
if params.n_gpu <= 0:
A_ : Dict = 0
A_ : str = -1
A_ : int = True
A_ : Union[str, Any] = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
A_ : str = int(os.environ["""WORLD_SIZE"""] )
A_ : int = int(os.environ["""N_GPU_NODE"""] )
A_ : int = int(os.environ["""RANK"""] )
# number of nodes / node ID
A_ : Optional[int] = params.world_size // params.n_gpu_per_node
A_ : Optional[int] = params.global_rank // params.n_gpu_per_node
A_ : Tuple = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
A_ : Dict = 1
A_ : Tuple = 0
A_ : Dict = 0
A_ : Optional[int] = 0
A_ : List[str] = 1
A_ : List[Any] = 1
A_ : List[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
A_ : Optional[int] = params.node_id == 0 and params.local_rank == 0
A_ : Optional[Any] = params.n_nodes > 1
# summary
A_ : str = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def __UpperCamelCase ( snake_case__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 480 | 1 |
def UpperCamelCase_ ( ) -> List[Any]:
a__ : Optional[int] = []
a__ : Dict = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
a__ : Dict = "".join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 37 | """simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ = []
for part_id in partition_order:
a__ = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(UpperCAmelCase__ ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> int:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(1_00 ).repartition(1 )
a__ = Spark(UpperCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(10 ).repartition(2 )
a__ = [1, 0]
a__ = _generate_iterable_examples(UpperCAmelCase__,UpperCAmelCase__ ) # Reverse the partitions.
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,UpperCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a__ , a__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(10 ).repartition(1 )
a__ = SparkExamplesIterable(UpperCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
a__ = lambda UpperCAmelCase__ : x.reverse()
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,[2, 1, 0] )
a__ = SparkExamplesIterable(UpperCAmelCase__ ).shuffle_data_sources(UpperCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
a__ , a__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a__ = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=0,num_workers=2 )
assert shard_it_a.n_shards == 2
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,[0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
a__ , a__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a__ = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=1,num_workers=2 )
assert shard_it_a.n_shards == 2
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,[1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
a__ , a__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(1_00 ).repartition(1 )
a__ = Spark(UpperCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 232 | 0 |
from importlib import import_module
from .logging import get_logger
lowerCamelCase__ : Optional[Any] = get_logger(__name__)
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : int = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__"""):
setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_))
lowercase__ : str = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj) else module
class _snake_case :
__lowerCAmelCase : Optional[int] = []
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = obj
lowercase__ : str = target
lowercase__ : Dict = new
lowercase__ : Optional[int] = target.split(""".""")[0]
lowercase__ : Dict = {}
lowercase__ : Dict = attrs or []
def __enter__( self):
'''simple docstring'''
lowercase__ : Optional[Any] = self.target.split(""".""")
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE_)):
try:
lowercase__ : List[Any] = import_module(""".""".join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowercase__ : Dict = getattr(self.obj , SCREAMING_SNAKE_CASE_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
lowercase__ : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs))
lowercase__ : Optional[int] = getattr(self.obj , SCREAMING_SNAKE_CASE_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) , attrs=self.attrs))
lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowercase__ : Union[str, Any] = getattr(import_module(""".""".join(SCREAMING_SNAKE_CASE_)) , SCREAMING_SNAKE_CASE_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE_) is attr_value:
lowercase__ : str = getattr(self.obj , SCREAMING_SNAKE_CASE_)
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowercase__ : Optional[int] = globals()["""__builtins__"""][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new)
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.')
def __exit__( self , *SCREAMING_SNAKE_CASE_):
'''simple docstring'''
for attr in list(self.original):
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_))
def lowercase__ ( self):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self)
def lowercase__ ( self):
'''simple docstring'''
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 717 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCamelCase__ : Dict = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
lowerCamelCase__ : Tuple = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
lowerCamelCase__ : Any = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_="binary" ) -> int:
'''simple docstring'''
lowercase__ : List[str] = simple_accuracy(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = float(fa_score(y_true=lowercase_ , y_pred=lowercase_ , average=lowercase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
lowercase__ : List[Any] = {}
for id_pred, label in zip(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
lowercase__ : Optional[Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase__ : Optional[int] = [(pred, label)]
lowercase__ , lowercase__ : List[str] = [], []
for question, preds_labels in question_map.items():
lowercase__ , lowercase__ : List[str] = zip(*lowercase_ )
lowercase__ : Optional[Any] = fa_score(y_true=lowercase_ , y_pred=lowercase_ , average="""macro""" )
fas.append(lowercase_ )
lowercase__ : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase_ ) )
ems.append(lowercase_ )
lowercase__ : str = float(sum(lowercase_ ) / len(lowercase_ ) )
lowercase__ : str = sum(lowercase_ ) / len(lowercase_ )
lowercase__ : List[str] = float(fa_score(y_true=lowercase_ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowercase__ ( self):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def lowercase__ ( self):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64"""),
"query": datasets.Value("""int64"""),
},
"prediction_text": datasets.Value("""string"""),
},
"references": {
"idx": {
"passage": datasets.Value("""int64"""),
"query": datasets.Value("""int64"""),
},
"answers": datasets.Sequence(datasets.Value("""string""")),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64"""),
"paragraph": datasets.Value("""int64"""),
"question": datasets.Value("""int64"""),
},
"prediction": datasets.Value("""int64"""),
},
"references": datasets.Value("""int64"""),
}
else:
return {
"predictions": datasets.Value("""int64"""),
"references": datasets.Value("""int64"""),
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)}
elif self.config_name == "cb":
return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , fa_avg="""macro""")
elif self.config_name == "record":
lowercase__ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowercase__ : str = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)[0]
elif self.config_name == "multirc":
return evaluate_multirc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
| 495 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | '''simple docstring'''
import operator
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None ) -> list:
"""simple docstring"""
__UpperCAmelCase : Tuple = operator.lt if reverse else operator.gt
__UpperCAmelCase : Any = solution or []
if not arr:
return solution
__UpperCAmelCase : Dict = [arr.pop(0 )]
for i, item in enumerate(lowerCamelCase__ ):
if _operator(lowerCamelCase__ , sublist[-1] ):
sublist.append(lowerCamelCase__ )
arr.pop(lowerCamelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase__ )
else:
while sublist:
__UpperCAmelCase : int = sublist.pop(0 )
for i, xx in enumerate(lowerCamelCase__ ):
if not _operator(lowerCamelCase__ , lowerCamelCase__ ):
solution.insert(lowerCamelCase__ , lowerCamelCase__ )
break
else:
solution.append(lowerCamelCase__ )
strand_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 168 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None # sigma(t_i)
@classmethod
def __UpperCamelCase ( cls ):
return cls()
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = 1_0_0 , __SCREAMING_SNAKE_CASE = 1.007 , __SCREAMING_SNAKE_CASE = 8_0 , __SCREAMING_SNAKE_CASE = 0.05 , __SCREAMING_SNAKE_CASE = 5_0 , ):
pass
def __UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ):
snake_case__ : List[Any] = jnp.arange(0 , __SCREAMING_SNAKE_CASE )[::-1].copy()
snake_case__ : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , schedule=jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
if self.config.s_min <= sigma <= self.config.s_max:
snake_case__ : str = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
snake_case__ : Optional[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case__ : Any = random.split(__SCREAMING_SNAKE_CASE , num=1 )
snake_case__ : Any = self.config.s_noise * random.normal(key=__SCREAMING_SNAKE_CASE , shape=sample.shape )
snake_case__ : Tuple = sigma + gamma * sigma
snake_case__ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ):
snake_case__ : int = sample_hat + sigma_hat * model_output
snake_case__ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
snake_case__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ):
snake_case__ : Union[str, Any] = sample_prev + sigma_prev * model_output
snake_case__ : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
snake_case__ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise NotImplementedError()
| 707 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCamelCase ( self ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : str = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __UpperCamelCase ( self ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCamelCase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case__ : int = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __UpperCamelCase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case__ : Any = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __UpperCamelCase ( self ):
import PIL.Image
snake_case__ : Optional[int] = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__SCREAMING_SNAKE_CASE ) as mock_cast_to_python_objects:
snake_case__ : Any = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
snake_case__ , snake_case__ : Dict = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __SCREAMING_SNAKE_CASE )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : int ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Tuple = pa.BufferReader(__magic_name__ ) if isinstance(__magic_name__ , pa.Buffer ) else pa.memory_map(__magic_name__ )
snake_case__ : Any = pa.ipc.open_stream(__magic_name__ )
snake_case__ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = pa.BufferOutputStream()
snake_case__ : Dict = pa.schema(__magic_name__ ) if fields else None
with ArrowWriter(stream=__magic_name__ , schema=__magic_name__ , writer_batch_size=__magic_name__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case__ , snake_case__ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case__ : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = pa.BufferOutputStream()
snake_case__ : Dict = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__magic_name__ , features=__magic_name__ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
snake_case__ , snake_case__ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case__ : Optional[Any] = pa.BufferReader(output.getvalue() )
snake_case__ : List[str] = pa.ipc.open_stream(__magic_name__ )
snake_case__ : pa.Table = f.read_all()
snake_case__ : Optional[Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__magic_name__ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def UpperCamelCase__ ( __magic_name__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(
stream=__magic_name__ , writer_batch_size=__magic_name__ , hash_salt="""split_name""" , check_duplicates=__magic_name__ , ) as writer:
with pytest.raises(__magic_name__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
snake_case__ , snake_case__ : Any = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__magic_name__ , writer_batch_size=__magic_name__ , hash_salt="""split_name""" , check_duplicates=__magic_name__ , ) as writer:
with pytest.raises(__magic_name__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
snake_case__ , snake_case__ : Tuple = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> str:
'''simple docstring'''
snake_case__ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__magic_name__ , writer_batch_size=__magic_name__ , hash_salt="""split_name""" , check_duplicates=__magic_name__ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
snake_case__ , snake_case__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = pa.BufferOutputStream()
snake_case__ : Optional[Any] = pa.schema(__magic_name__ ) if fields else None
with ArrowWriter(stream=__magic_name__ , schema=__magic_name__ , writer_batch_size=__magic_name__ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
snake_case__ , snake_case__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case__ : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> int:
'''simple docstring'''
snake_case__ : int = pa.BufferOutputStream()
snake_case__ : Dict = pa.schema(__magic_name__ ) if fields else None
with ArrowWriter(stream=__magic_name__ , schema=__magic_name__ , writer_batch_size=__magic_name__ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
snake_case__ , snake_case__ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case__ : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : str ) -> Dict:
'''simple docstring'''
snake_case__ : int = pa.BufferOutputStream()
snake_case__ : int = pa.schema(__magic_name__ ) if fields else None
with ArrowWriter(stream=__magic_name__ , schema=__magic_name__ , writer_batch_size=__magic_name__ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
snake_case__ , snake_case__ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case__ : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
snake_case__ : Union[str, Any] = os.path.join(__magic_name__ , """test.arrow""" )
with ArrowWriter(path=__magic_name__ , schema=pa.schema(__magic_name__ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
snake_case__ , snake_case__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__magic_name__ , metadata=writer._schema.metadata )
_check_output(__magic_name__ , 1 )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Any:
'''simple docstring'''
if pa.types.is_list(__magic_name__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Dict:
'''simple docstring'''
if isinstance(lst[0] , __magic_name__ ):
change_first_primitive_element_in_list(lst[0] , __magic_name__ )
else:
snake_case__ : Tuple = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : List[str] ) -> Any:
'''simple docstring'''
snake_case__ : str = pa.array(TypedSequence(__magic_name__ , optimized_int_type=__magic_name__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = pa.array(OptimizedTypedSequence(__magic_name__ , col=__magic_name__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case__ : Optional[int] = copy.deepcopy(__magic_name__ )
snake_case__ : Tuple = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__magic_name__ , __magic_name__ )
snake_case__ : int = pa.array(OptimizedTypedSequence(__magic_name__ , col=__magic_name__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case__ : Tuple = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__magic_name__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = """mock://dataset-train.arrow"""
with ArrowWriter(path=__magic_name__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__magic_name__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case__ , snake_case__ : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__magic_name__ )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = pa.BufferOutputStream()
with ParquetWriter(stream=__magic_name__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case__ , snake_case__ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case__ : int = pa.BufferReader(output.getvalue() )
snake_case__ : pa.Table = pq.read_table(__magic_name__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
import PIL.Image
snake_case__ : List[str] = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__magic_name__ , format="""png""" )
snake_case__ : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=__magic_name__ , features=Features({"""image""": Image()} ) , embed_local_files=__magic_name__ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
snake_case__ : List[Any] = pa.BufferReader(output.getvalue() )
snake_case__ : pa.Table = pq.read_table(__magic_name__ )
snake_case__ : str = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __magic_name__ )
with open(__magic_name__ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__magic_name__ )] )
snake_case__ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(stream=__magic_name__ ) as writer:
writer._build_writer(inferred_schema=__magic_name__ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 419 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _UpperCAmelCase = 768 , ) -> Dict:
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _UpperCAmelCase ) )
UpperCamelCase_ = nn.Parameter(torch.ones(1 , _UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , ) -> List[Any]:
UpperCamelCase_ = nn.Parameter(self.mean.to(_UpperCAmelCase ).to(_UpperCAmelCase ) )
UpperCamelCase_ = nn.Parameter(self.std.to(_UpperCAmelCase ).to(_UpperCAmelCase ) )
return self
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = """hf-internal-testing/tiny-random-t5"""
lowercase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
lowercase = tokenizer("""This is me""" , return_tensors="""pt""" )
lowercase = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase = model.generate(**_lowerCAmelCase )
lowercase = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase = model_reloaded.generate(**_lowerCAmelCase )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """hf-internal-testing/tiny-random-t5"""
lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
lowercase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_lowerCAmelCase ):
model.save_pretrained(_lowerCAmelCase )
lowercase = model.reverse_bettertransformer()
model.save_pretrained(_lowerCAmelCase )
| 588 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCAmelCase = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCAmelCase = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__UpperCAmelCase = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: Union[str, Any] = None
# source code of `config_class`
snake_case: Optional[int] = inspect.getsource(__A )
snake_case: str = _re_checkpoint.findall(__A )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
snake_case: Optional[int] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case: int = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
snake_case: List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: int = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case: str = get_checkpoint_from_config_class(__A )
snake_case: Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__A )
if len(__A ) > 0:
snake_case: Any = '\n'.join(sorted(__A ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 702 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ByTaTokenizer
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ):
'''simple docstring'''
snake_case: Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
snake_case: Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case: List[str] = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , SCREAMING_SNAKE_CASE__ ) )
snake_case: str = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
snake_case: Union[str, Any] = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
snake_case: Tuple = toks + toks
# toks_str = [t[1] for t in toks]
snake_case: Dict = [t[0] for t in toks]
# Ensure consistency
snake_case: int = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case: str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
snake_case: Tuple = ' ' + output_txt
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
snake_case: List[Any] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: Union[str, Any] = 'Unicode €.'
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'Unicode €.</s>' )
snake_case: List[Any] = tokenizer('e è é ê ë' )
snake_case: Optional[Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.ta_base_tokenizer
snake_case: Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case: Optional[int] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if FRAMEWORK != "jax":
snake_case: Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
snake_case: Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.ta_base_tokenizer
snake_case: List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case: Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.ta_base_tokenizer
snake_case: str = [
'Summary of the text.',
'Another summary.',
]
snake_case: Dict = tokenizer(
text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding='max_length' , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.ta_base_tokenizer
snake_case: Optional[int] = ['A long paragraph for summarization. </s>']
snake_case: str = ['Summary of the text. </s>']
# fmt: off
snake_case: str = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
snake_case: Optional[int] = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
snake_case: List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['input_ids'][0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['labels'][0] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
snake_case: Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: Union[str, Any] = tempfile.mkdtemp()
snake_case: Dict = ' He is very happy, UNwant\u00E9d,running'
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Any = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: List[str] = tempfile.mkdtemp()
snake_case: str = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
snake_case: List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case: int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
snake_case: Union[str, Any] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
snake_case: Any = json.load(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
snake_case: str = json.load(SCREAMING_SNAKE_CASE__ )
snake_case: int = [F"""<extra_id_{i}>""" for i in range(1_25 )]
snake_case: Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case: str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case: Dict = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case: Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=SCREAMING_SNAKE_CASE__ )]
snake_case: Union[str, Any] = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Union[str, Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
snake_case: List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case: Dict = 0
snake_case: List[Any] = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [] )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 692 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowerCamelCase ( unittest.TestCase ):
__a = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__a = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__a = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: str= ZeroShotClassificationPipeline(
model=lowerCAmelCase , tokenizer=lowerCAmelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# No kwarg
SCREAMING_SNAKE_CASE__: int= classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: Dict= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE__: str= classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE__: Tuple= classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier(lowerCAmelCase , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=lowerCAmelCase )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=lowerCAmelCase , )
self.run_entailment_id(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE__: str= config.labelaid
SCREAMING_SNAKE_CASE__: int= zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE__: str= {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE__: Any= {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: List[Any]= {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: Tuple= {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE__: Dict= original_labelaid
self.assertEqual(lowerCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Union[str, Any]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
SCREAMING_SNAKE_CASE__: List[str]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
SCREAMING_SNAKE_CASE__: List[Any]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Dict= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__: Tuple= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 64 |
import inspect
import unittest
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__snake_case = inspect.getmembers(SCREAMING_SNAKE_CASE , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__snake_case = "k-diffusion"
elif backend == "invisible_watermark":
__snake_case = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 371 | 0 |
"""simple docstring"""
import torch
def UpperCamelCase ( ):
if torch.cuda.is_available():
__a = torch.cuda.device_count()
else:
__a = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 708 | """simple docstring"""
__A = 6_55_21
def UpperCamelCase ( _lowerCAmelCase : str ):
__a = 1
__a = 0
for plain_chr in plain_text:
__a = (a + ord(_lowerCAmelCase )) % MOD_ADLER
__a = (b + a) % MOD_ADLER
return (b << 16) | a
| 173 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
while a != 0:
__snake_case , __snake_case : str = b % a, a
return b
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if gcd(_lowerCamelCase , _lowerCamelCase ) != 1:
__snake_case : Any = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase )
__snake_case , __snake_case , __snake_case : Dict = 1, 0, a
__snake_case , __snake_case , __snake_case : List[Any] = 0, 1, m
while va != 0:
__snake_case : List[str] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 26 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a : List[str] = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 613 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase: Optional[int] = logging.get_logger(__name__)
UpperCAmelCase: Union[str, Any] = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase ( __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'ibert'
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,UpperCAmelCase_=2 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=False ,UpperCAmelCase_="none" ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
_lowercase : Tuple = vocab_size
_lowercase : Optional[int] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Union[str, Any] = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : List[Any] = initializer_range
_lowercase : Dict = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : List[Any] = quant_mode
_lowercase : List[str] = force_dequant
class UpperCamelCase ( __lowercase ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ):
if self.task == "multiple-choice":
_lowercase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowercase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 714 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "upernet"
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=[1, 2, 3, 6] ,UpperCAmelCase_=True ,UpperCAmelCase_=0.4 ,UpperCAmelCase_=3_84 ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=1 ,UpperCAmelCase_=False ,UpperCAmelCase_=2_55 ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowercase : List[str] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = backbone_config.get("""model_type""" )
_lowercase : List[Any] = CONFIG_MAPPING[backbone_model_type]
_lowercase : str = config_class.from_dict(UpperCAmelCase_ )
_lowercase : Union[str, Any] = backbone_config
_lowercase : Dict = hidden_size
_lowercase : int = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : Dict = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : List[str] = auxiliary_in_channels
_lowercase : List[str] = auxiliary_channels
_lowercase : Optional[int] = auxiliary_num_convs
_lowercase : List[Any] = auxiliary_concat_input
_lowercase : List[Any] = loss_ignore_index
def lowerCamelCase__ ( self ):
_lowercase : int = copy.deepcopy(self.__dict__ )
_lowercase : List[Any] = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 600 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int = None , UpperCAmelCase_ : int = None ):
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : Optional[Any] = merges
SCREAMING_SNAKE_CASE : int = BytePairTokenizer(UpperCAmelCase_ , UpperCAmelCase_ , sequence_length=UpperCAmelCase_ )
@classmethod
def _A ( cls : Tuple , UpperCAmelCase_ : GPTaTokenizer , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : Optional[Any] = [" ".join(UpperCAmelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : str = tokenizer.get_vocab()
return cls(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def _A ( cls : str , UpperCAmelCase_ : Union[str, os.PathLike] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
return cls.from_tokenizer(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def _A ( cls : Union[str, Any] , UpperCAmelCase_ : str ):
return cls(**UpperCAmelCase_ )
def _A ( self : List[Any] ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _A ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int = None ):
SCREAMING_SNAKE_CASE : Any = self.tf_tokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = tf.ones_like(UpperCAmelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = pad_model_inputs(
UpperCAmelCase_ , max_seq_length=UpperCAmelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 62 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = os.path.join(args.tf_model_dir , "parameters.json" )
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(open(lowercase ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
SCREAMING_SNAKE_CASE : Optional[int] = args.output + ".pt"
SCREAMING_SNAKE_CASE : Any = OrderedDict()
with tf.device("/CPU:0" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.train.load_checkpoint(args.tf_model_dir )
SCREAMING_SNAKE_CASE : Union[str, Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
SCREAMING_SNAKE_CASE : Any = reader.get_tensor(lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
SCREAMING_SNAKE_CASE : Optional[int] = 8
SCREAMING_SNAKE_CASE : List[Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
SCREAMING_SNAKE_CASE : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowercase )
elif key_name.startswith("model/moe" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
SCREAMING_SNAKE_CASE : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase )
elif key_name.endswith("/softmlp/kernel" ):
SCREAMING_SNAKE_CASE : Dict = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
SCREAMING_SNAKE_CASE : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
SCREAMING_SNAKE_CASE : Optional[int] = key_name[-9:-7]
for i in range(16 ):
SCREAMING_SNAKE_CASE : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
SCREAMING_SNAKE_CASE : List[str] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowercase )
elif key_name.startswith("model/mlp" ):
SCREAMING_SNAKE_CASE : str = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
SCREAMING_SNAKE_CASE : Dict = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase )
elif key_name.endswith("/p1/bias" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
elif key_name.endswith("/p2/kernel" ):
SCREAMING_SNAKE_CASE : str = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
SCREAMING_SNAKE_CASE : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
elif key_name.endswith("/p2/bias" ):
SCREAMING_SNAKE_CASE : Tuple = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
SCREAMING_SNAKE_CASE : str = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase )
elif key_name.startswith("model/ln" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.feed_forward.norm.bias" % player
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowercase )
elif key_name.endswith("/g" ):
SCREAMING_SNAKE_CASE : List[str] = "model.blocks.%d.feed_forward.norm.weight" % player
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase )
elif key_name.startswith("model/att" ):
SCREAMING_SNAKE_CASE : Optional[int] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
SCREAMING_SNAKE_CASE : List[str] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
SCREAMING_SNAKE_CASE : List[str] = state[:, 0, :, :]
SCREAMING_SNAKE_CASE : Tuple = state[:, 1, :, :]
SCREAMING_SNAKE_CASE : List[Any] = state[:, 2, :, :]
SCREAMING_SNAKE_CASE : Tuple = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Union[str, Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : int = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowercase )
elif key_name.endswith("/o/kernel" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
SCREAMING_SNAKE_CASE : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
elif key_name.startswith("model/an" ):
SCREAMING_SNAKE_CASE : int = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
SCREAMING_SNAKE_CASE : List[Any] = "model.blocks.%d.self_attn.norm.bias" % player
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowercase )
elif key_name.endswith("/g" ):
SCREAMING_SNAKE_CASE : Tuple = "model.blocks.%d.self_attn.norm.weight" % player
SCREAMING_SNAKE_CASE : List[str] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
SCREAMING_SNAKE_CASE : str = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
SCREAMING_SNAKE_CASE : List[str] = "model.%s.weight" % nlayer
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
if key_name.startswith("model/wte" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "lm_head.weight"
SCREAMING_SNAKE_CASE : List[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase )
elif key_name.startswith("model/wob" ):
SCREAMING_SNAKE_CASE : List[Any] = "final_logits_bias"
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : List[str] = state.reshape((1, -1) )
SCREAMING_SNAKE_CASE : int = torch.tensor(lowercase )
elif key_name == "model/dense/kernel":
SCREAMING_SNAKE_CASE : Optional[int] = "model.last_project.weight"
SCREAMING_SNAKE_CASE : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowercase )
elif key_name == "model/dense_1/bias":
SCREAMING_SNAKE_CASE : str = "model.last_project.bias"
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
torch.save(lowercase , args.output )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
snake_case = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 62 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'MCTCTFeatureExtractor'
lowercase__ = 'AutoTokenizer'
def __init__( self , __a , __a) -> Optional[int]:
'''simple docstring'''
super().__init__(__a , __a)
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
def __call__( self , *__a , **__a) -> Any:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a)
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''')
_UpperCamelCase = kwargs.pop('''raw_speech''')
else:
_UpperCamelCase = kwargs.pop('''audio''' , __a)
_UpperCamelCase = kwargs.pop('''sampling_rate''' , __a)
_UpperCamelCase = kwargs.pop('''text''' , __a)
if len(__a) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
_UpperCamelCase = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a)
if text is not None:
_UpperCamelCase = self.tokenizer(__a , **__a)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCamelCase = encodings['''input_ids''']
return inputs
def UpperCAmelCase ( self , *__a , **__a) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Optional[int]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__a , **__a)
_UpperCamelCase = kwargs.pop('''input_features''' , __a)
_UpperCamelCase = kwargs.pop('''labels''' , __a)
if len(__a) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if input_features is not None:
_UpperCamelCase = self.feature_extractor.pad(__a , *__a , **__a)
if labels is not None:
_UpperCamelCase = self.tokenizer.pad(__a , **__a)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCamelCase = labels['''input_ids''']
return input_features
def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@contextmanager
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''')
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer
yield
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
| 720 |
"""simple docstring"""
import copy
import re
class _UpperCAmelCase:
lowercase__ = 'hp'
lowercase__ = {}
lowercase__ = None
@classmethod
def UpperCAmelCase ( cls , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = prefix
_UpperCamelCase = defaults
cls.build_naming_info()
@staticmethod
def UpperCAmelCase ( __a , __a) -> Union[str, Any]:
'''simple docstring'''
if len(__a) == 0:
return ""
_UpperCamelCase = None
if any(char.isdigit() for char in word):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a) + 1):
_UpperCamelCase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_UpperCamelCase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a):
_UpperCamelCase = ''''''
while integer != 0:
_UpperCamelCase = chr(ord('''A''') + integer % 10) + s
integer //= 10
return s
_UpperCamelCase = 0
while True:
_UpperCamelCase = word + '''#''' + int_to_alphabetic(__a)
if sword in info["reverse_short_word"]:
continue
else:
_UpperCamelCase = sword
break
_UpperCamelCase = short_word
_UpperCamelCase = word
return short_word
@staticmethod
def UpperCAmelCase ( __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = param_name.split('''_''')
_UpperCamelCase = [TrialShortNamer.shortname_for_word(__a , __a) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_UpperCamelCase = ['''''', '''_''']
for separator in separators:
_UpperCamelCase = separator.join(__a)
if shortname not in info["reverse_short_param"]:
_UpperCamelCase = shortname
_UpperCamelCase = param_name
return shortname
return param_name
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = TrialShortNamer.shortname_for_key(__a , __a)
_UpperCamelCase = short_name
_UpperCamelCase = param_name
@classmethod
def UpperCAmelCase ( cls) -> Any:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_UpperCamelCase = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
_UpperCamelCase = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(__a , __a)
_UpperCamelCase = info
@classmethod
def UpperCAmelCase ( cls , __a) -> Optional[Any]:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_UpperCamelCase = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_UpperCamelCase = cls.NAMING_INFO['''short_param'''][k]
if isinstance(__a , __a):
_UpperCamelCase = 1 if v else 0
_UpperCamelCase = '''''' if isinstance(__a , (int, float)) else '''-'''
_UpperCamelCase = F'''{key}{sep}{v}'''
name.append(__a)
return "_".join(__a)
@classmethod
def UpperCAmelCase ( cls , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = repr[len(cls.PREFIX) + 1 :]
if repr == "":
_UpperCamelCase = []
else:
_UpperCamelCase = repr.split('''_''')
_UpperCamelCase = {}
for value in values:
if "-" in value:
_UpperCamelCase , _UpperCamelCase = value.split('''-''')
else:
_UpperCamelCase = re.sub('''[0-9.]''' , '''''' , __a)
_UpperCamelCase = float(re.sub('''[^0-9.]''' , '''''' , __a))
_UpperCamelCase = cls.NAMING_INFO['''reverse_short_param'''][p_k]
_UpperCamelCase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_UpperCamelCase = cls.DEFAULTS[k]
return parameters
| 78 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class snake_case ( lowerCamelCase_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """trocr"""
snake_case_ : str = ["""past_key_values"""]
snake_case_ : Tuple = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Any , lowerCAmelCase : Any=5_0265 , lowerCAmelCase : Optional[int]=1024 , lowerCAmelCase : Dict=12 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : str=4096 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : Dict=512 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : int=0.0 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : Any=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=2 , **lowerCAmelCase : Tuple , ) -> Any:
"""simple docstring"""
_snake_case : int = vocab_size
_snake_case : Dict = d_model
_snake_case : Optional[int] = decoder_layers
_snake_case : Optional[Any] = decoder_attention_heads
_snake_case : Optional[Any] = decoder_ffn_dim
_snake_case : int = activation_function
_snake_case : Tuple = max_position_embeddings
_snake_case : Any = dropout
_snake_case : Tuple = attention_dropout
_snake_case : str = activation_dropout
_snake_case : Optional[int] = init_std
_snake_case : List[str] = decoder_layerdrop
_snake_case : Union[str, Any] = use_cache
_snake_case : List[Any] = scale_embedding
_snake_case : str = use_learned_position_embeddings
_snake_case : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , **a_ , )
| 477 |
"""simple docstring"""
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , a_ : int )-> str:
"""simple docstring"""
UpperCAmelCase_ : Any = n
UpperCAmelCase_ : str = [None] * self.n
UpperCAmelCase_ : List[Any] = 0 # index of the first element
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Dict = 0
def __len__( self : Union[str, Any] )-> int:
"""simple docstring"""
return self.size
def a ( self : Dict )-> bool:
"""simple docstring"""
return self.size == 0
def a ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def a ( self : Dict , a_ : int )-> Optional[int]:
"""simple docstring"""
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
UpperCAmelCase_ : List[str] = data
UpperCAmelCase_ : Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def a ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
if self.size == 0:
raise Exception("""UNDERFLOW""" )
UpperCAmelCase_ : Tuple = self.array[self.front]
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = (self.front + 1) % self.n
self.size -= 1
return temp
| 470 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCamelCase = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_UpperCamelCase = value
else:
_UpperCamelCase = value
return new_state_dict
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:256, :]
_UpperCamelCase = in_proj_bias[:256]
_UpperCamelCase = in_proj_weight[256:512, :]
_UpperCamelCase = in_proj_bias[256:512]
_UpperCamelCase = in_proj_weight[-256:, :]
_UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:256, :]
_UpperCamelCase = in_proj_bias[:256]
_UpperCamelCase = in_proj_weight[256:512, :]
_UpperCamelCase = in_proj_bias[256:512]
_UpperCamelCase = in_proj_weight[-256:, :]
_UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCamelCase = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCamelCase = in_proj_weight_cross_attn[:256, :]
_UpperCamelCase = in_proj_bias_cross_attn[:256]
_UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
_UpperCamelCase = in_proj_bias_cross_attn[256:512]
_UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
_UpperCamelCase = in_proj_bias_cross_attn[-256:]
def lowerCAmelCase__ ( a__ , a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = image.size
_UpperCamelCase = max(a__ , a__ )
_UpperCamelCase = 800 if '''detection''' in checkpoint_url else 1_000
_UpperCamelCase = target_max_size / current_max_size
_UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = F.to_tensor(a__ )
_UpperCamelCase = F.normalize(a__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->List[str]:
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
_UpperCamelCase = torch.hub.load_state_dict_from_url(a__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
_UpperCamelCase = rename_backbone_keys(a__ )
# query, key and value matrices need special treatment
read_in_q_k_v(a__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
# create HuggingFace model and load state dict
_UpperCamelCase = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCamelCase = 15
_UpperCamelCase = 2
_UpperCamelCase = {0: '''table''', 1: '''table rotated'''}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCamelCase = 125
_UpperCamelCase = 6
_UpperCamelCase = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
_UpperCamelCase = TableTransformerForObjectDetection(a__ )
model.load_state_dict(a__ )
model.eval()
# verify our conversion
_UpperCamelCase = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
_UpperCamelCase = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a__ )
_UpperCamelCase = Image.open(a__ ).convert("RGB" )
_UpperCamelCase = normalize(resize(a__ , a__ ) ).unsqueeze(0 )
_UpperCamelCase = model(a__ )
if "detection" in checkpoint_url:
_UpperCamelCase = (1, 15, 3)
_UpperCamelCase = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_UpperCamelCase = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_UpperCamelCase = (1, 125, 7)
_UpperCamelCase = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_UpperCamelCase = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
image_processor.save_pretrained(a__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_UpperCamelCase = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(a__ )
image_processor.push_to_hub(a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 717 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Optional[Any] = DebertaTokenizer
snake_case__ :int = True
snake_case__ :Dict = DebertaTokenizerFast
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase__ = {"unk_token": "[UNK]"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase__ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = tokenizer("Hello" , "World" )
lowerCAmelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , __magic_name__ )
@slow
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=__magic_name__ )
lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=__magic_name__ )
lowerCAmelCase__ = tokenizer.encode(
"sequence builders" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCAmelCase__ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCAmelCase__ = tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase__ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
lowerCAmelCase__ = tokenizer(__magic_name__ , padding=__magic_name__ )
lowerCAmelCase__ = [tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) for seq in encoding["input_ids"]]
# fmt: off
lowerCAmelCase__ = {
"input_ids": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCAmelCase__ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , __magic_name__ )
for expected, decoded in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(__magic_name__ , __magic_name__ )
| 48 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
'''simple docstring'''
A__ = 42
A__ = None
A__ = None
def UpperCamelCase ( ) -> Node | None:
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(__magic_name__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__magic_name__ , __magic_name__ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(__magic_name__ , __magic_name__ ) )
lowercase__ = 0
return output
def UpperCamelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'''In-order Traversal: {inorder(__magic_name__ )}''' )
print(f'''Pre-order Traversal: {preorder(__magic_name__ )}''' )
print(f'''Post-order Traversal: {postorder(__magic_name__ )}''' , """\n""" )
print(f'''Height of Tree: {height(__magic_name__ )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__magic_name__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__magic_name__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(__magic_name__ , level=__magic_name__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self , snake_case_ ) -> Optional[int]:
__lowerCAmelCase = 3
__lowerCAmelCase = 250
__lowerCAmelCase = ids_tensor((batch_size, length) , snake_case_ )
__lowerCAmelCase = torch.ones((batch_size, length) , device=snake_case_ , dtype=torch.float ) / length
return input_ids, scores
def A__ ( self ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(5 )
__lowerCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = MaxLengthCriteria(max_length=10 )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(5 )
__lowerCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
def A__ ( self ) -> Dict:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(snake_case_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__lowerCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(snake_case_ ) , 1 )
| 715 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowercase (_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__lowerCAmelCase = precision
__lowerCAmelCase = ceil(precision / 14 )
__lowerCAmelCase = 42_6880 * Decimal(1_0005 ).sqrt()
__lowerCAmelCase = 1
__lowerCAmelCase = 1359_1409
__lowerCAmelCase = Decimal(_lowerCAmelCase )
for k in range(1 , _lowerCAmelCase ):
__lowerCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCAmelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 573 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : str = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 543 |
"""simple docstring"""
import random
def a__ ( snake_case__ , snake_case__ , snake_case__ = False ) -> dict:
lowerCamelCase = {i: [] for i in range(snake_case__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case__ ):
for j in range(i + 1 , snake_case__ ):
if random.random() < probability:
graph[i].append(snake_case__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case__ )
return graph
def a__ ( snake_case__ ) -> dict:
return {
i: [j for j in range(snake_case__ ) if i != j] for i in range(snake_case__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 543 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A : List[Any] = pytest.mark.integration
@require_faiss
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(_a ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = self._create_dummy_dataset()
lowerCamelCase = dset.map(
lambda _a , _a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_a , keep_in_memory=_a )
lowerCamelCase = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(_a , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCamelCase = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCamelCase = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
lowerCamelCase = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=_a )
lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase = np.zeros(5 , dtype=np.floataa )
lowerCamelCase = 1
lowerCamelCase , lowerCamelCase = index.search(_a )
self.assertRaises(_a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase , lowerCamelCase = index.search_batch(_a )
self.assertRaises(_a , index.search_batch , queries[0] )
lowerCamelCase = [scores[0] for scores in total_scores]
lowerCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_a ):
lowerCamelCase = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = faiss.IndexFlat(5 )
lowerCamelCase = FaissIndex(custom_index=_a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase = np.zeros(5 , dtype=np.floataa )
lowerCamelCase = 1
lowerCamelCase , lowerCamelCase = index.search(_a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a__ ( snake_case__ ) -> Tuple:
import faiss
lowerCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase = """index.faiss"""
lowerCamelCase = F'mock://{index_name}'
index.save(snake_case__ , storage_options=mockfs.storage_options )
lowerCamelCase = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
lowerCamelCase = np.zeros(5 , dtype=np.floataa )
lowerCamelCase = 1
lowerCamelCase , lowerCamelCase = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCamelCase = Elasticsearch()
lowerCamelCase = {"""acknowledged""": True}
lowerCamelCase = ElasticSearchIndex(es_client=_a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
lowerCamelCase = """foo"""
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCamelCase , lowerCamelCase = index.search(_a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase = """foo"""
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCamelCase , lowerCamelCase = index.search(_a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase = ["""foo""", """bar""", """foobar"""]
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCamelCase , lowerCamelCase = index.search_batch(_a )
lowerCamelCase = [scores[0] for scores in total_scores]
lowerCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
# batched queries with timeout
lowerCamelCase = ["""foo""", """bar""", """foobar"""]
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCamelCase , lowerCamelCase = index.search_batch(_a , request_timeout=30 )
lowerCamelCase = [scores[0] for scores in total_scores]
lowerCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
| 704 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a = None , _a = None , _a=None , _a=None ):
"""simple docstring"""
if not conversation_id:
lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
lowerCamelCase = []
if generated_responses is None:
lowerCamelCase = []
lowerCamelCase = conversation_id
lowerCamelCase = past_user_inputs
lowerCamelCase = generated_responses
lowerCamelCase = text
def __eq__( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowerCAmelCase ( self , _a , _a = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
lowerCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
lowerCamelCase = text
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCamelCase = None
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
self.generated_responses.append(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
"""simple docstring"""
lowerCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
lowerCamelCase = """user""" if is_user else """bot"""
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
UpperCAmelCase__ , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
"""simple docstring"""
super().__init__(*_a , **_a )
if self.tokenizer.pad_token_id is None:
lowerCamelCase = self.tokenizer.eos_token
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , **_a ):
"""simple docstring"""
lowerCamelCase = {}
lowerCamelCase = {}
lowerCamelCase = {}
if min_length_for_response is not None:
lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_a )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _a , _a=0 , **_a ):
"""simple docstring"""
lowerCamelCase = super().__call__(_a , num_workers=_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
def _lowerCAmelCase ( self , _a , _a=32 ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
lowerCamelCase = self.tokenizer._build_conversation_input_ids(_a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCamelCase = self._legacy_parse_and_tokenize(_a )
if self.framework == "pt":
lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowerCAmelCase ( self , _a , _a=10 , **_a ):
"""simple docstring"""
lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
lowerCamelCase = max_length - minimum_tokens
lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
lowerCamelCase = model_inputs.pop("""conversation""" )
lowerCamelCase = max_length
lowerCamelCase = self.model.generate(**_a , **_a )
if self.model.config.is_encoder_decoder:
lowerCamelCase = 1
else:
lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowerCAmelCase ( self , _a , _a=True ):
"""simple docstring"""
lowerCamelCase = model_outputs["""output_ids"""]
lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_a )
return conversation
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = self.tokenizer.eos_token_id
lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_a , add_special_tokens=_a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_a , add_special_tokens=_a ) )
if len(_a ) > self.tokenizer.model_max_length:
lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 533 | 0 |
import qiskit
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> qiskit.result.counts.Counts:
UpperCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
UpperCamelCase : int = qiskit.QuantumCircuit(snake_case__ , snake_case__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCamelCase : str = qiskit.execute(snake_case__ , snake_case__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 40 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 1 |
lowerCamelCase__ = range(2, 20 + 1)
lowerCamelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCamelCase__ = {}
def A(__a: Tuple , __a: Optional[Any] , __a: List[str] , __a: Optional[int] ):
lowerCAmelCase_ = sum(a_i[j] for j in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase_ = sum(a_i[j] * base[j] for j in range(min(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase_ , lowerCAmelCase_ = 0, 0
lowerCAmelCase_ = n - i
lowerCAmelCase_ = memo.get(_SCREAMING_SNAKE_CASE )
if sub_memo is not None:
lowerCAmelCase_ = sub_memo.get(_SCREAMING_SNAKE_CASE )
if jumps is not None and len(_SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
lowerCAmelCase_ = -1
for _k in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase_ = _k
break
if max_jump >= 0:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase_ = diff + c
for j in range(min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ = divmod(_SCREAMING_SNAKE_CASE , 10 )
if new_c > 0:
add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase_ = []
else:
lowerCAmelCase_ = {c: []}
lowerCAmelCase_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase_ , lowerCAmelCase_ = next_term(_SCREAMING_SNAKE_CASE , k - 1 , i + dn , _SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase_ , lowerCAmelCase_ = compute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + dn , _SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
lowerCAmelCase_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase_ = 0
while j < len(_SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def A(__a: Dict , __a: str , __a: List[Any] , __a: Dict ):
if i >= n:
return 0, i
if k > len(_SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(_SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase_ = i
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0, 0, 0
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase_ = ds_c + ds_b
diff += addend
lowerCAmelCase_ = 0
for j in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = a_i[j] + addend
lowerCAmelCase_ , lowerCAmelCase_ = divmod(_SCREAMING_SNAKE_CASE , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return diff, i - start_i
def A(__a: Union[str, Any] , __a: Optional[int] , __a: str ):
for j in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase_ = digits[j] + addend
if s >= 10:
lowerCAmelCase_ , lowerCAmelCase_ = divmod(_SCREAMING_SNAKE_CASE , 10 )
lowerCAmelCase_ = addend // 10 + quotient
else:
lowerCAmelCase_ = s
lowerCAmelCase_ = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase_ , lowerCAmelCase_ = divmod(_SCREAMING_SNAKE_CASE , 10 )
digits.append(_SCREAMING_SNAKE_CASE )
def A(__a: Any = 10**15 ):
lowerCAmelCase_ = [1]
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
while True:
lowerCAmelCase_ , lowerCAmelCase_ = next_term(_SCREAMING_SNAKE_CASE , 20 , i + dn , _SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase_ = 0
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 701 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ''''''
lowerCamelCase__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase__ = None # compression type in fsspec. ex: "gzip"
lowerCamelCase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _a = "" , _a = None , _a = None , **_a ) -> Any:
super().__init__(self , **_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase_ = fsspec.open(
_a , mode="rb" , protocol=_a , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase_ = os.path.basename(self.file.path.split("::" )[0] )
lowerCAmelCase_ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowerCAmelCase_ = None
@classmethod
def __a ( cls , _a ) -> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_a ).lstrip("/" )
def __a ( self ) -> Union[str, Any]:
if self.dir_cache is None:
lowerCAmelCase_ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowerCAmelCase_ = {f["name"]: f}
def __a ( self , _a ) -> Dict:
return self.file.open().read()
def __a ( self , _a , _a = "rb" , _a=None , _a=True , _a=None , **_a , ) -> Optional[Any]:
lowerCAmelCase_ = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''.bz2'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''.gz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''.lz4'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''.xz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''.zst'''
def __init__( self , _a , _a = "rb" , _a = None , _a = None , _a = DEFAULT_BLOCK_SIZE , **_a , ) -> Tuple:
super().__init__(
fo=_a , mode=_a , target_protocol=_a , target_options=_a , block_size=_a , **_a , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase_ = self.file.__enter__
class __magic_name__ :
def __init__( self , _a ) -> List[str]:
lowerCAmelCase_ = file_
def __enter__( self ) -> int:
self._file.__enter__()
return self
def __exit__( self , *_a , **_a ) -> Dict:
self._file.__exit__(*_a , **_a )
def __iter__( self ) -> List[Any]:
return iter(self._file )
def __a ( self ) -> List[Any]:
return next(self._file )
def __getattr__( self , _a ) -> Tuple:
return getattr(self._file , _a )
def fixed_enter(*_a , **_a ):
return WrappedFile(_enter(*_a , **_a ) )
lowerCAmelCase_ = fixed_enter
| 226 | 0 |
from math import isqrt
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase_ , lowercase_ ):
UpperCAmelCase_ = False
return [i for i in range(2 , lowercase_ ) if is_prime[i]]
def __lowerCAmelCase ( A = 10**8 ):
UpperCAmelCase_ = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(lowercase_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }') | 162 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Any = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[str] = '''gpt_neo'''
UpperCAmelCase__: str = ['''past_key_values''']
UpperCAmelCase__: Dict = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , A__=5_0257 , A__=2048 , A__=2048 , A__=24 , A__=[[["global", "local"], 12]] , A__=16 , A__=None , A__=256 , A__="gelu_new" , A__=0.0 , A__=0.0 , A__=0.0 , A__=0.1 , A__=1e-5 , A__=0.0_2 , A__=True , A__=5_0256 , A__=5_0256 , **A__ , ):
A__ : Optional[Any] = vocab_size
A__ : Optional[int] = max_position_embeddings
A__ : Optional[int] = hidden_size
A__ : Dict = num_layers
A__ : List[str] = num_heads
A__ : Optional[int] = intermediate_size
A__ : Union[str, Any] = window_size
A__ : Optional[int] = activation_function
A__ : Optional[Any] = resid_dropout
A__ : Optional[int] = embed_dropout
A__ : Any = attention_dropout
A__ : Optional[Any] = classifier_dropout
A__ : int = layer_norm_epsilon
A__ : Tuple = initializer_range
A__ : str = use_cache
A__ : List[str] = bos_token_id
A__ : List[Any] = eos_token_id
A__ : Any = attention_types
A__ : List[str] = self.expand_attention_types_params(A__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=A__ , eos_token_id=A__ , **A__ )
@staticmethod
def __A ( A__ ):
A__ : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCamelCase (lowercase_: Dict , lowercase_: Any , lowercase_: Union[str, Any] , lowercase_: Optional[Any] ) -> Dict:
import torch
A__ : List[Any] = input.size()
A__ : List[str] = len(lowercase_ )
A__ : Union[str, Any] = shape[dimension]
A__ : Optional[int] = torch.arange(0 , lowercase_ , lowercase_ )
A__ : str = torch.div(sizedim - size , lowercase_ , rounding_mode="""floor""" ) + 1
A__ : Any = torch.arange(lowercase_ ) + low_indices[:min_length][:, None]
A__ : Union[str, Any] = [slice(lowercase_ )] * rank
A__ : Any = indices
A__ : List[str] = input[s]
A__ : Dict = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowercase_ )
def UpperCamelCase (lowercase_: str , lowercase_: int ) -> int:
import torch
A__ : Union[str, Any] = torch.arange(1 , lowercase_ )
A__ : str = torch.remainder(lowercase_ , lowercase_ )
A__ : Optional[Any] = remainders == 0
A__ : Any = candidates[divisor_indices]
A__ : Optional[Any] = torch.max(lowercase_ )
return largest_divisor, torch.div(lowercase_ , lowercase_ , rounding_mode="""floor""" )
class _a (__magic_name__ ):
'''simple docstring'''
@property
def __A ( self ):
A__ : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A__ , direction="""inputs""" )
A__ : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A__ : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ):
return self._config.num_heads
def __A ( self , A__ , A__ = -1 , A__ = -1 , A__ = False , A__ = None , ):
A__ : int = super(A__ , self ).generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
# We need to order the input in the way they appears in the forward()
A__ : Optional[int] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A__ , A__ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A__ : int = seqlen + 2
A__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Dict = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(self.num_layers )
]
A__ : List[Any] = common_inputs["""attention_mask"""]
if self.use_past:
A__ : int = ordered_inputs["""attention_mask"""].dtype
A__ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self ):
return 13
| 456 | 0 |
from __future__ import annotations
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCAmelCase ( lowerCAmelCase):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE ( self: List[str] ):
raise NotImplementedError()
| 453 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase (_lowerCamelCase : float )-> float:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
return quad(_lowerCamelCase , 0 , _lowerCamelCase , args=(_lowerCamelCase) )[0]
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return math.pow(_lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 24 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[int] =16
_lowercase : List[str] =32
def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""")
a__ : Optional[int] = load_dataset("""glue""" , """mrpc""")
def tokenize_function(_lowercase : Optional[int]):
# max_length=None => use the model max length (it's actually the default)
a__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Optional[int] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Any = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(_lowercase : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
a__ : List[Any] = 8
else:
a__ : Any = None
return tokenizer.pad(
_lowercase , padding="""longest""" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=_lowercase)
a__ : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : int) -> List[str]:
"""simple docstring"""
# Initialize accelerator
a__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : List[Any] = config["""lr"""]
a__ : int = int(config["""num_epochs"""])
a__ : Tuple = int(config["""seed"""])
a__ : Dict = int(config["""batch_size"""])
a__ : Dict = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
a__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
a__ : str = MAX_GPU_BATCH_SIZE
set_seed(_lowercase)
a__ , a__ : Any = get_dataloaders(_lowercase , _lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : Dict = model.to(accelerator.device)
# Instantiate optimizer
a__ : Any = AdamW(params=model.parameters() , lr=_lowercase)
# Instantiate scheduler
a__ : List[str] = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Tuple = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# Now we train the model
for epoch in range(_lowercase):
model.train()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
a__ : List[str] = model(**_lowercase)
a__ : List[str] = outputs.loss
a__ : str = loss / gradient_accumulation_steps
accelerator.backward(_lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a__ : Any = model(**_lowercase)
a__ : Optional[Any] = outputs.logits.argmax(dim=-1)
a__ , a__ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
a__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=_lowercase , default=_lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
a__ : Optional[int] = parser.parse_args()
a__ : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase)
if __name__ == "__main__":
main()
| 136 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> Tuple:
# Checks if the entire collection has been sorted
if len(UpperCAmelCase_ ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase_ , n - 1 )
rec_insertion_sort(UpperCAmelCase_ , n - 1 )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> List[Any]:
# Checks order between adjacent elements
if index >= len(UpperCAmelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =(
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase_ , index + 1 )
if __name__ == "__main__":
_lowercase = input("""Enter integers separated by spaces: """)
_lowercase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 431 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase__( snake_case_ ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """num_attention_heads""" ) )
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=6_4 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1_6 , __UpperCAmelCase=[1_2_8, 2_5_6, 3_8_4] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[1_6, 1_6, 1_6] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = kernel_size
__lowercase = stride
__lowercase = padding
__lowercase = hidden_sizes
__lowercase = num_attention_heads
__lowercase = depths
__lowercase = key_dim
__lowercase = drop_path_rate
__lowercase = patch_size
__lowercase = attention_ratio
__lowercase = mlp_ratio
__lowercase = initializer_range
__lowercase = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = initializer_range
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCamelCase : List[str] = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Tuple = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Any = False
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = LevitModelTester(self )
__lowercase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def __magic_name__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(__UpperCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowercase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__lowercase = (self.model_tester.image_size, self.model_tester.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
__lowercase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowercase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__lowercase = model(**__UpperCAmelCase ).loss
loss.backward()
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowercase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__lowercase = model(**__UpperCAmelCase ).loss
loss.backward()
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
__lowercase = problem_type["""title"""]
__lowercase = problem_type["""num_labels"""]
__lowercase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
__lowercase = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
__lowercase = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
__lowercase = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __magic_name__ ( self ):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowercase__ ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**__UpperCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowercase = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 566 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
return EnvironmentCommand()
def lowercase__ ( __UpperCamelCase : Tuple ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase__( snake_case_ ):
@staticmethod
def __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
__lowercase = parser.add_parser("""env""" )
download_parser.set_defaults(func=__UpperCAmelCase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=__UpperCAmelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self , __UpperCAmelCase , *__UpperCAmelCase ):
"""simple docstring"""
__lowercase = accelerate_config_file
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """not installed"""
if is_safetensors_available():
import safetensors
__lowercase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
__lowercase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__lowercase = """not installed"""
__lowercase = __lowercase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowercase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__UpperCAmelCase ):
__lowercase = load_config_from_file(self._accelerate_config_file ).to_dict()
__lowercase = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else F'''\t{accelerate_config}'''
)
__lowercase = """not installed"""
__lowercase = """NA"""
if is_torch_available():
import torch
__lowercase = torch.__version__
__lowercase = torch.cuda.is_available()
__lowercase = """not installed"""
__lowercase = """NA"""
if is_tf_available():
import tensorflow as tf
__lowercase = tf.__version__
try:
# deprecated in v2.1
__lowercase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowercase = bool(tf.config.list_physical_devices("""GPU""" ) )
__lowercase = """not installed"""
__lowercase = """not installed"""
__lowercase = """not installed"""
__lowercase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
__lowercase = flax.__version__
__lowercase = jax.__version__
__lowercase = jaxlib.__version__
__lowercase = jax.lib.xla_bridge.get_backend().platform
__lowercase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__UpperCAmelCase ) )
return info
@staticmethod
def __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 566 | 1 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCAmelCase__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
__A : Optional[Any] = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def snake_case__ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
A__ : Optional[Any] = CursorInfo()
A__ : Tuple = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
A__ : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case__ ( ) -> Dict:
"""simple docstring"""
if os.name == "nt":
A__ : List[str] = CursorInfo()
A__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
A__ : int = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case__ ( ) -> Optional[int]:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor() | 182 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase__ ( unittest.TestCase , UpperCamelCase ):
def _lowercase ( self : List[Any]):
A__ : Optional[Any] = load_tool("text-classification")
self.tool.setup()
A__ : Any = load_tool("text-classification" , remote=_A)
def _lowercase ( self : List[Any]):
A__ : Optional[Any] = self.tool("That's quite cool" , ["positive", "negative"])
self.assertEqual(_A , "positive")
def _lowercase ( self : List[str]):
A__ : Any = self.remote_tool("That's quite cool" , ["positive", "negative"])
self.assertEqual(_A , "positive")
def _lowercase ( self : Optional[Any]):
A__ : List[str] = self.tool(text="That's quite cool" , labels=["positive", "negative"])
self.assertEqual(_A , "positive")
def _lowercase ( self : Tuple):
A__ : Union[str, Any] = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"])
self.assertEqual(_A , "positive") | 182 | 1 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> int:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE_ : str =0
SCREAMING_SNAKE_CASE_ : Optional[int] =str(UpperCAmelCase_ )
while len(UpperCAmelCase_ ) != 1:
SCREAMING_SNAKE_CASE_ : Optional[int] =[int(UpperCAmelCase_ ) for i in num_string]
SCREAMING_SNAKE_CASE_ : Tuple =1
for i in range(0 , len(UpperCAmelCase_ ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =str(UpperCAmelCase_ )
steps += 1
return steps
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> int:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE_ : Any =0
SCREAMING_SNAKE_CASE_ : List[Any] =str(UpperCAmelCase_ )
while len(UpperCAmelCase_ ) != 1:
SCREAMING_SNAKE_CASE_ : List[Any] =[int(UpperCAmelCase_ ) for i in num_string]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0
for i in range(0 , len(UpperCAmelCase_ ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE_ : List[Any] =str(UpperCAmelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int = 8 ) -> str:
SCREAMING_SNAKE_CASE_ : str =ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =i // 3
SCREAMING_SNAKE_CASE_ : Optional[Any] =i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
SCREAMING_SNAKE_CASE_ : int =(
chars_incl
+ random(UpperCAmelCase_ , quotient + remainder )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
)
SCREAMING_SNAKE_CASE_ : Optional[int] =list(UpperCAmelCase_ )
shuffle(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> str:
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ) -> Optional[int]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ) -> Dict:
pass # Put your code here...
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ) -> Optional[int]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int = 8 ) -> bool:
if len(UpperCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
SCREAMING_SNAKE_CASE_ : List[str] =any(char in ascii_uppercase for char in password )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =any(char in ascii_lowercase for char in password )
SCREAMING_SNAKE_CASE_ : List[Any] =any(char in digits for char in password )
SCREAMING_SNAKE_CASE_ : str =any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Any =int(input('''Please indicate the max length of your password: ''' ).strip() )
SCREAMING_SNAKE_CASE_ : Any =input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCAmelCase_ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCAmelCase_ , UpperCAmelCase_ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 443 | 1 |
from __future__ import annotations
def a_ ( __snake_case , __snake_case ) -> list[int]:
'''simple docstring'''
UpperCamelCase_ = 0
UpperCamelCase_ = len(__snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase_ = i + 1
else:
UpperCamelCase_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 559 |
def a_ ( __snake_case ) -> list[int]:
'''simple docstring'''
UpperCamelCase_ = [0 for i in range(len(__snake_case ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase_ , UpperCamelCase_ = 0, 0
for i in range(1 , len(__snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase_ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCamelCase_ = min_edge
while go_next(__snake_case , __snake_case , __snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase_ , UpperCamelCase_ = i, i + z_result[i] - 1
return z_result
def a_ ( __snake_case , __snake_case , __snake_case ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(__snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a_ ( __snake_case , __snake_case ) -> int:
'''simple docstring'''
UpperCamelCase_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_A: int = get_logger()
_A: Optional[dict] = None
class UpperCAmelCase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__( self , __A=None , __A=None , **__A ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
__UpperCAmelCase = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
__UpperCAmelCase = str(jax.devices()[0] )
__UpperCAmelCase = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def __lowerCamelCase ( self , __A ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def __lowerCamelCase ( self , __A ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__UpperCAmelCase = {'dtype': jnp.intaa}
else:
__UpperCAmelCase = {'dtype': jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
__UpperCAmelCase = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self , __A ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , '__array__' ) and not isinstance(__A , jax.Array ):
__UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def __lowerCamelCase ( self , __A ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__A )
__UpperCAmelCase = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__A )
__UpperCAmelCase = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
__UpperCAmelCase = self.recursive_tensorize(__A )
__UpperCAmelCase = self._consolidate(__A )
return column
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__A )
__UpperCAmelCase = self.python_features_decoder.decode_batch(__A )
__UpperCAmelCase = self.recursive_tensorize(__A )
for column_name in batch:
__UpperCAmelCase = self._consolidate(batch[column_name] )
return batch
| 126 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_A: Optional[int] = True
except (ImportError, ModuleNotFoundError):
_A: Dict = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _lowerCAmelCase ( _lowerCAmelCase )-> str:
re.sub('<n>' , '' , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 126 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
snake_case__ : str = 'poolformer'
def __init__( self :Dict , __magic_name__ :Any=3 , __magic_name__ :str=16 , __magic_name__ :str=16 , __magic_name__ :str=3 , __magic_name__ :Optional[int]=4.0 , __magic_name__ :Optional[Any]=[2, 2, 6, 2] , __magic_name__ :Any=[64, 128, 320, 512] , __magic_name__ :str=[7, 3, 3, 3] , __magic_name__ :List[str]=[4, 2, 2, 2] , __magic_name__ :Tuple=[2, 1, 1, 1] , __magic_name__ :Optional[Any]=4 , __magic_name__ :Optional[Any]=0.0 , __magic_name__ :Dict="gelu" , __magic_name__ :int=True , __magic_name__ :str=1e-5 , __magic_name__ :Any=0.02 , **__magic_name__ :Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
a__ = num_channels
a__ = patch_size
a__ = stride
a__ = padding
a__ = pool_size
a__ = hidden_sizes
a__ = mlp_ratio
a__ = depths
a__ = patch_sizes
a__ = strides
a__ = num_encoder_blocks
a__ = drop_path_rate
a__ = hidden_act
a__ = use_layer_scale
a__ = layer_scale_init_value
a__ = initializer_range
super().__init__(**__magic_name__ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
snake_case__ : int = version.parse('1.11' )
@property
def _UpperCamelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self :str ) -> float:
'''simple docstring'''
return 2e-3
| 708 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self :Optional[int] , __magic_name__ :Dict , __magic_name__ :List[str]=13 , __magic_name__ :Tuple=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :str=True , __magic_name__ :Optional[Any]=True , __magic_name__ :int=True , __magic_name__ :Optional[Any]=99 , __magic_name__ :Optional[int]=32 , __magic_name__ :str=5 , __magic_name__ :List[Any]=4 , __magic_name__ :str=37 , __magic_name__ :List[str]="gelu" , __magic_name__ :str=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Any=512 , __magic_name__ :int=16 , __magic_name__ :Tuple=2 , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :List[str]=False , __magic_name__ :List[Any]=True , __magic_name__ :List[Any]="None" , __magic_name__ :str=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :Dict=None , ) -> Union[str, Any]:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = relative_attention
a__ = position_biased_input
a__ = pos_att_type
a__ = scope
def _UpperCamelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self :int ) -> str:
'''simple docstring'''
a__ = self.get_config()
a__ = 300
return config
def _UpperCamelCase ( self :str , __magic_name__ :str ) -> List[Any]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Dict , __magic_name__ :Tuple , __magic_name__ :Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ = DebertaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )[0]
a__ = model(__magic_name__ , token_type_ids=__magic_name__ )[0]
a__ = model(__magic_name__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :Any , __magic_name__ :int , __magic_name__ :List[Any] , __magic_name__ :List[Any] , __magic_name__ :int , __magic_name__ :Union[str, Any] ) -> str:
'''simple docstring'''
a__ = DebertaForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self :Any , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :str , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :Tuple ) -> str:
'''simple docstring'''
a__ = self.num_labels
a__ = DebertaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__magic_name__ )
def _UpperCamelCase ( self :int , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ = self.num_labels
a__ = DebertaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self :str , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Union[str, Any] , __magic_name__ :str , __magic_name__ :Tuple ) -> Tuple:
'''simple docstring'''
a__ = DebertaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[Any] = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : int = True
snake_case__ : Any = False
snake_case__ : List[str] = False
snake_case__ : Any = False
snake_case__ : Dict = False
def _UpperCamelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
a__ = DebertaModelTester(self )
a__ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def _UpperCamelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__magic_name__ )
def _UpperCamelCase ( self :Any ) -> Any:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__magic_name__ )
def _UpperCamelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__magic_name__ )
def _UpperCamelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__magic_name__ )
def _UpperCamelCase ( self :str ) -> int:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__magic_name__ )
@slow
def _UpperCamelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = DebertaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
@slow
def _UpperCamelCase ( self :Tuple ) -> str:
'''simple docstring'''
a__ = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
a__ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
a__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a__ = model(__magic_name__ , attention_mask=__magic_name__ )[0]
# compare the actual values for a slice.
a__ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 158 | 0 |
'''simple docstring'''
import os
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Dict = os.path.dirname(os.path.realpath(__UpperCAmelCase ) )
lowerCamelCase_ : Optional[Any] = os.path.join(__UpperCAmelCase , '''triangle.txt''' )
with open(__UpperCAmelCase ) as f:
lowerCamelCase_ : Dict = f.readlines()
lowerCamelCase_ : str = []
for line in triangle:
lowerCamelCase_ : Optional[Any] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(__UpperCAmelCase ) )
a.append(__UpperCAmelCase )
for i in range(1 , len(__UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
lowerCamelCase_ : Any = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCamelCase_ : Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCAmelCase , __UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 501 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase : str = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = test_results.split(''' ''' )
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Optional[int] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase_ : Optional[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Tuple = {}
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Any = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , __UpperCAmelCase ):
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
lowerCamelCase_ : str = line
lowerCamelCase_ : List[Any] = False
return failures
class lowerCAmelCase__ :
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = title
lowerCamelCase_ : Dict = doc_test_results['''time_spent'''].split(''',''' )[0]
lowerCamelCase_ : str = doc_test_results['''success''']
lowerCamelCase_ : int = doc_test_results['''failures''']
lowerCamelCase_ : List[Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase_ : Union[str, Any] = doc_test_results
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = [self._time_spent]
lowerCamelCase_ : Optional[int] = 0
for time in time_spent:
lowerCamelCase_ : Tuple = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase_ ) == 1:
lowerCamelCase_ : int = [0, 0, time_parts[0]]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F"""{int(UpperCamelCase_ )}h{int(UpperCamelCase_ )}m{int(UpperCamelCase_ )}s"""
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Any = 40
lowerCamelCase_ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(UpperCamelCase_ , UpperCamelCase_ )}
lowerCamelCase_ : int = ''''''
for category, failures in category_failures.items():
if len(UpperCamelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase_ )
@staticmethod
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Any = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(UpperCamelCase_ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=UpperCamelCase_ , )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
lowerCamelCase_ : Optional[int] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
lowerCamelCase_ : Union[str, Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=UpperCamelCase_ , )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : str = ''''''
for key, value in failures.items():
lowerCamelCase_ : List[Any] = value[:200] + ''' [Truncated]''' if len(UpperCamelCase_ ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
lowerCamelCase_ : List[str] = job_name
lowerCamelCase_ : List[str] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
lowerCamelCase_ : List[str] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
lowerCamelCase_ : Dict = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
lowerCamelCase_ : List[Any] = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
lowerCamelCase_ : str = F"""*Num failures* :{len(job_result['failed'] )} \n"""
lowerCamelCase_ : Any = job_result['''failures''']
lowerCamelCase_ : Any = self.get_reply_blocks(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text=UpperCamelCase_ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F"""Results for {job}""" , blocks=UpperCamelCase_ , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Any = os.environ['''GITHUB_RUN_ID''']
lowerCamelCase_ : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
lowerCamelCase_ : Tuple = requests.get(__UpperCAmelCase ).json()
lowerCamelCase_ : Union[str, Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowerCamelCase_ : Union[str, Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[Any] = {}
if os.path.exists(__UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
lowerCamelCase_ : str = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}.""" ) from e
return _artifact
def __snake_case ():
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self : Any , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = name
lowerCamelCase_ : str = []
def __str__( self : int ) -> Tuple:
"""simple docstring"""
return self.name
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
lowerCamelCase_ : Dict[str, Artifact] = {}
lowerCamelCase_ : List[str] = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase_ : List[Any] = directory
if artifact_name not in _available_artifacts:
lowerCamelCase_ : List[Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase : List[Any] = get_job_links()
__lowerCamelCase : str = retrieve_available_artifacts()
__lowerCamelCase : Tuple = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase : Optional[Any] = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase : str = github_actions_job_links.get("""run_doctests""")
__lowerCamelCase : Optional[int] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowerCamelCase : Tuple = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = handle_test_results(artifact["""stats"""])
__lowerCamelCase : Union[str, Any] = failed
__lowerCamelCase : str = success
__lowerCamelCase : int = time_spent[1:-1] + """, """
__lowerCamelCase : List[Any] = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowerCamelCase : List[Any] = line.replace("""FAILED """, """""")
__lowerCamelCase : List[str] = line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = line.split("""::""")
else:
__lowerCamelCase , __lowerCamelCase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase : Tuple = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase : List[Any] = all_failures[test] if test in all_failures else """N/A"""
__lowerCamelCase : Optional[int] = failure
break
__lowerCamelCase : Dict = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 501 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 1 |
'''simple docstring'''
def _lowercase ( UpperCamelCase__ : Optional[Any] ):
__A : Dict = 1
__A : Dict = 2
while i * i <= n:
__A : Any = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowercase ( ):
__A : List[Any] = 1
__A : Optional[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCamelCase__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 365 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : torch.FloatTensor
class _lowerCamelCase ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowercase = 16 , __lowercase = 88 , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = 32 , __lowercase = None , __lowercase = False , __lowercase = None , __lowercase = "geglu" , __lowercase = True , __lowercase = True , ):
"""simple docstring"""
super().__init__()
__A : Optional[int] = num_attention_heads
__A : Dict = attention_head_dim
__A : Optional[int] = num_attention_heads * attention_head_dim
__A : Optional[Any] = in_channels
__A : Optional[int] = torch.nn.GroupNorm(num_groups=__lowercase , num_channels=__lowercase , eps=1E-6 , affine=__lowercase )
__A : Optional[Any] = nn.Linear(__lowercase , __lowercase )
# 3. Define transformers blocks
__A : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , cross_attention_dim=__lowercase , activation_fn=__lowercase , attention_bias=__lowercase , double_self_attention=__lowercase , norm_elementwise_affine=__lowercase , )
for d in range(__lowercase )
] )
__A : Union[str, Any] = nn.Linear(__lowercase , __lowercase )
def snake_case__ ( self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=1 , __lowercase=None , __lowercase = True , ):
"""simple docstring"""
__A ,__A ,__A ,__A : int = hidden_states.shape
__A : Union[str, Any] = batch_frames // num_frames
__A : Union[str, Any] = hidden_states
__A : List[str] = hidden_states[None, :].reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__A : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__A : Dict = self.norm(__lowercase )
__A : List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowercase , __lowercase )
__A : Optional[Any] = self.proj_in(__lowercase )
# 2. Blocks
for block in self.transformer_blocks:
__A : List[Any] = block(
__lowercase , encoder_hidden_states=__lowercase , timestep=__lowercase , cross_attention_kwargs=__lowercase , class_labels=__lowercase , )
# 3. Output
__A : Optional[int] = self.proj_out(__lowercase )
__A : int = (
hidden_states[None, None, :]
.reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__A : List[str] = hidden_states.reshape(__lowercase , __lowercase , __lowercase , __lowercase )
__A : str = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowercase )
| 365 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a= logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a= []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def _UpperCamelCase ( _a : Optional[Any] , _a : Optional[int] , _a : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Tuple = state_dict.pop(_a )
__UpperCamelCase : Dict = val
def _UpperCamelCase ( _a : Dict ):
"""simple docstring"""
__UpperCamelCase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCamelCase : List[Any] = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__UpperCamelCase : Any = value
else:
__UpperCamelCase : List[Any] = value
return new_state_dict
def _UpperCamelCase ( _a : Tuple ):
"""simple docstring"""
__UpperCamelCase : int = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCamelCase : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCamelCase : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : List[Any] = in_proj_weight[:2_5_6, :]
__UpperCamelCase : List[str] = in_proj_bias[:2_5_6]
__UpperCamelCase : Union[str, Any] = in_proj_weight[2_5_6:5_1_2, :]
__UpperCamelCase : str = in_proj_bias[2_5_6:5_1_2]
__UpperCamelCase : Dict = in_proj_weight[-2_5_6:, :]
__UpperCamelCase : Optional[Any] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCamelCase : str = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Tuple = in_proj_weight[:2_5_6, :]
__UpperCamelCase : int = in_proj_bias[:2_5_6]
__UpperCamelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
__UpperCamelCase : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
__UpperCamelCase : Optional[Any] = in_proj_weight[-2_5_6:, :]
__UpperCamelCase : List[str] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
__UpperCamelCase : List[str] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__UpperCamelCase : Tuple = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCamelCase : List[Any] = in_proj_weight_cross_attn[:2_5_6, :]
__UpperCamelCase : Optional[int] = in_proj_bias_cross_attn[:2_5_6]
__UpperCamelCase : List[Any] = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
__UpperCamelCase : Tuple = in_proj_bias_cross_attn[2_5_6:5_1_2]
__UpperCamelCase : Optional[int] = in_proj_weight_cross_attn[-2_5_6:, :]
__UpperCamelCase : Tuple = in_proj_bias_cross_attn[-2_5_6:]
def _UpperCamelCase ( _a : List[Any] , _a : str ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : str = image.size
__UpperCamelCase : int = max(_a , _a )
__UpperCamelCase : List[str] = 8_0_0 if 'detection' in checkpoint_url else 1_0_0_0
__UpperCamelCase : Union[str, Any] = target_max_size / current_max_size
__UpperCamelCase : Tuple = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _UpperCamelCase ( _a : List[str] ):
"""simple docstring"""
__UpperCamelCase : int = F.to_tensor(_a )
__UpperCamelCase : str = F.normalize(_a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _UpperCamelCase ( _a : Union[str, Any] , _a : Union[str, Any] , _a : List[Any] ):
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
__UpperCamelCase : Any = torch.hub.load_state_dict_from_url(_a , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(_a , _a , _a )
__UpperCamelCase : str = rename_backbone_keys(_a )
# query, key and value matrices need special treatment
read_in_q_k_v(_a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCamelCase : Dict = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__UpperCamelCase : str = state_dict.pop(_a )
__UpperCamelCase : Tuple = val
# create HuggingFace model and load state dict
__UpperCamelCase : List[Any] = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__UpperCamelCase : Dict = 1_5
__UpperCamelCase : int = 2
__UpperCamelCase : str = {0: 'table', 1: 'table rotated'}
__UpperCamelCase : str = idalabel
__UpperCamelCase : int = {v: k for k, v in idalabel.items()}
else:
__UpperCamelCase : Any = 1_2_5
__UpperCamelCase : Tuple = 6
__UpperCamelCase : List[Any] = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
__UpperCamelCase : Optional[Any] = idalabel
__UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Any = DetrImageProcessor(
format='coco_detection' , max_size=8_0_0 if 'detection' in checkpoint_url else 1_0_0_0 )
__UpperCamelCase : Any = TableTransformerForObjectDetection(_a )
model.load_state_dict(_a )
model.eval()
# verify our conversion
__UpperCamelCase : Any = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
__UpperCamelCase : Dict = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=_a )
__UpperCamelCase : Any = Image.open(_a ).convert('RGB' )
__UpperCamelCase : Tuple = normalize(resize(_a , _a ) ).unsqueeze(0 )
__UpperCamelCase : List[Any] = model(_a )
if "detection" in checkpoint_url:
__UpperCamelCase : List[Any] = (1, 1_5, 3)
__UpperCamelCase : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
__UpperCamelCase : List[Any] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
__UpperCamelCase : int = (1, 1_2_5, 7)
__UpperCamelCase : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
__UpperCamelCase : str = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _a , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _a , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
image_processor.save_pretrained(_a )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
__UpperCamelCase : Union[str, Any] = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(_a )
image_processor.push_to_hub(_a )
if __name__ == "__main__":
a= argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a= parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 287 | '''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a= {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _UpperCamelCase ( _a : int , _a : Union[str, Any] , _a : str , _a : List[str] , _a : Dict=False , _a : str=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCamelCase : List[Any] = cached_file(_a , _a , force_download=not use_cached_models )
__UpperCamelCase : Union[str, Any] = config_class.from_json_file(_a )
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : List[str] = True
print(f"""Building TensorFlow model from configuration: {config}""" )
__UpperCamelCase : Optional[int] = model_class(_a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCamelCase : Union[str, Any] = cached_file(
_a , _a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCamelCase : Any = load_pytorch_checkpoint_in_tfa_model(_a , _a )
if compare_with_pt_model:
__UpperCamelCase : Dict = tf_model(tf_model.dummy_inputs , training=_a ) # build the network
__UpperCamelCase : List[str] = torch.load(_a , map_location='cpu' )
__UpperCamelCase : Union[str, Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_a , config=_a , state_dict=_a )
with torch.no_grad():
__UpperCamelCase : Any = pt_model(**pt_model.dummy_inputs )
__UpperCamelCase : Optional[Any] = pto[0].numpy()
__UpperCamelCase : int = tfo[0].numpy()
__UpperCamelCase : Union[str, Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(_a , save_format='h5' )
def _UpperCamelCase ( _a : str , _a : Optional[Any] , _a : Dict=None , _a : Tuple=None , _a : int=False , _a : Union[str, Any]=False , _a : int=False , _a : List[str]=False , ):
"""simple docstring"""
if args_model_type is None:
__UpperCamelCase : Dict = list(MODEL_CLASSES.keys() )
else:
__UpperCamelCase : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(_a , start=1 ):
print('=' * 1_0_0 )
print(f""" Converting model type {j}/{len(_a )}: {model_type}""" )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCamelCase : List[str] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCamelCase : Optional[int] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_a , _a ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
__UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(_a )}: {model_shortcut_name} - model_type {model_type}""" )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
__UpperCamelCase : Tuple = cached_file(_a , _a , force_download=not use_cached_models )
else:
__UpperCamelCase : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCamelCase : Dict = cached_file(_a , _a , force_download=not use_cached_models )
else:
__UpperCamelCase : Tuple = model_shortcut_name
if os.path.isfile(_a ):
__UpperCamelCase : int = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=_a , pytorch_checkpoint_path=_a , config_file=_a , tf_dump_path=os.path.join(_a , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=_a , )
if remove_cached_files:
os.remove(_a )
os.remove(_a )
if __name__ == "__main__":
a= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
a= parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 287 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["LayoutLMv3FeatureExtractor"]
SCREAMING_SNAKE_CASE__ : List[str] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | A = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A = [{'type': 'code', 'content': INSTALL_CONTENT}]
A = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 544 | 0 |
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE__:Any = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE__:Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _lowerCamelCase( a ):
re.sub("<n>" , "" , UpperCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase__ ) )
| 708 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _UpperCamelCase ( __snake_case):
def A (self , lowerCamelCase__ ):
"""simple docstring"""
return 0.0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : np.ndarray , UpperCamelCase : int ):
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : FilterType , UpperCamelCase : int ):
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(UpperCamelCase ) )
A__ = 20 * np.logaa(UpperCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A__ = get_bounds(UpperCamelCase , UpperCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(UpperCamelCase )
plt.show()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : FilterType , UpperCamelCase : int ):
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(UpperCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(UpperCamelCase , -2 * pi ) )
plt.show()
| 574 |
"""simple docstring"""
from __future__ import annotations
import time
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
def __init__(self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = parent
class _UpperCamelCase :
def __init__(self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase__ )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase__ )
A__ = [self.start]
A__ = False
def A (self ):
"""simple docstring"""
while self.node_queue:
A__ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
A__ = True
return self.retrace_path(lowerCamelCase__ )
A__ = self.get_successors(lowerCamelCase__ )
for node in successors:
self.node_queue.append(lowerCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def A (self , lowerCamelCase__ ):
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , lowerCamelCase__ ) )
return successors
def A (self , lowerCamelCase__ ):
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
def __init__(self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__ )
A__ = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__ )
A__ = False
def A (self ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
A__ = self.fwd_bfs.node_queue.pop(0 )
A__ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
A__ = True
return self.retrace_bidirectional_path(
lowerCamelCase__ , lowerCamelCase__ )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def A (self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = self.fwd_bfs.retrace_path(lowerCamelCase__ )
A__ = self.bwd_bfs.retrace_path(lowerCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase__ = time.time()
lowerCamelCase__ = BreadthFirstSearch(init, goal)
lowerCamelCase__ = bfs.search()
lowerCamelCase__ = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
lowerCamelCase__ = time.time()
lowerCamelCase__ = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase__ = bd_bfs.search()
lowerCamelCase__ = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 574 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A__ ( lowercase_ , unittest.TestCase):
A_ : Optional[int] = MvpTokenizer
A_ : Optional[int] = MvpTokenizerFast
A_ : Optional[int] = True
A_ : Tuple = filter_roberta_detectors
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__lowerCAmelCase : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowerCAmelCase : int = {'unk_token': '<unk>'}
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return "lower newer", "lower newer"
@cached_property
def __lowerCamelCase ( self ):
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def __lowerCamelCase ( self ):
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Union[str, Any] = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase : Tuple = tokenizer(_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__lowerCAmelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase : Dict = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _SCREAMING_SNAKE_CASE )
self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('labels' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_attention_mask' , _SCREAMING_SNAKE_CASE )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase : Optional[Any] = tokenizer(text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def __lowerCamelCase ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase : Optional[Any] = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = ['A long paragraph for summarization.']
__lowerCAmelCase : str = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase : Dict = tokenizer(_SCREAMING_SNAKE_CASE , text_target=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowerCAmelCase : Optional[Any] = inputs['input_ids']
__lowerCAmelCase : Union[str, Any] = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 'A, <mask> AllenNLP sentence.'
__lowerCAmelCase : Optional[int] = tokenizer_r.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = tokenizer_p.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__lowerCAmelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__lowerCAmelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) | 720 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[str] = len(_UpperCamelCase )
__lowerCAmelCase : Tuple = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
__lowerCAmelCase : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 549 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 132 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 199 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
snake_case_ = """"""
snake_case_ = """"""
snake_case_ = """"""
snake_case_ = """"""
def __lowercase (_SCREAMING_SNAKE_CASE :str ):
SCREAMING_SNAKE_CASE : str = tweepy.OAuthHandler(snake_case__ , snake_case__ )
auth.set_access_token(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE : Optional[int] = tweepy.API(snake_case__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[str] = api.user_timeline(screen_name=snake_case__ , count=2_00 )
# save most recent tweets
alltweets.extend(snake_case__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Optional[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(snake_case__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(
screen_name=snake_case__ , count=2_00 , max_id=snake_case__ )
# save most recent tweets
alltweets.extend(snake_case__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Optional[int] = alltweets[-1].id - 1
print(F'''...{len(snake_case__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[int] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = csv.writer(snake_case__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(snake_case__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 706 |
'''simple docstring'''
from __future__ import annotations
def __lowercase (_SCREAMING_SNAKE_CASE :int | str ):
SCREAMING_SNAKE_CASE : int = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def __lowercase (_SCREAMING_SNAKE_CASE :int = 1_00_00_00 ):
SCREAMING_SNAKE_CASE : int = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 355 | 0 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 136 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=_SCREAMING_SNAKE_CASE ):
snake_case = ["speech"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ["""speech"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_SCREAMING_SNAKE_CASE ):
snake_case = ["speech"]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(self , ["""speech"""] )
| 129 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_UpperCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class _snake_case ( a_ ):
def __init__( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowerCAmelCase = kwargs.pop('text_queries' )
if isinstance(_SCREAMING_SNAKE_CASE , (str, Image.Image) ):
lowerCAmelCase = {'image': image, 'candidate_labels': candidate_labels}
else:
lowerCAmelCase = image
lowerCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return results
def _SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {}
if "threshold" in kwargs:
lowerCAmelCase = kwargs['threshold']
if "top_k" in kwargs:
lowerCAmelCase = kwargs['top_k']
return {}, {}, postprocess_params
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = load_image(inputs['image'] )
lowerCAmelCase = inputs['candidate_labels']
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = candidate_labels.split(',' )
lowerCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
lowerCAmelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
yield {
"is_last": i == len(_SCREAMING_SNAKE_CASE ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = model_inputs.pop('target_size' )
lowerCAmelCase = model_inputs.pop('candidate_label' )
lowerCAmelCase = model_inputs.pop('is_last' )
lowerCAmelCase = self.model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
lowerCAmelCase = []
for model_output in model_outputs:
lowerCAmelCase = model_output['candidate_label']
lowerCAmelCase = BaseModelOutput(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.image_processor.post_process_object_detection(
outputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase = outputs['scores'][index].item()
lowerCAmelCase = self._get_bounding_box(outputs['boxes'][index][0] )
lowerCAmelCase = {'score': score, 'label': label, 'box': box}
results.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x["score"] , reverse=_SCREAMING_SNAKE_CASE )
if top_k:
lowerCAmelCase = results[:top_k]
return results
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = box.int().tolist()
lowerCAmelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 514 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {"vocab_file": "spiece.model"}
_UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_UpperCamelCase : List[Any] = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
lowerCAmelCase = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = '<|endoftext|>' if eos_token is None else eos_token
lowerCAmelCase = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = '<pad>' if pad_token is None else pad_token
lowerCAmelCase = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
F'[{"".join(map(_SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.non_printing_characters_re.sub('' , _SCREAMING_SNAKE_CASE )
# Normalize whitespaces
lowerCAmelCase = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize('NFC' , _SCREAMING_SNAKE_CASE )
return text
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.preprocess_text(_SCREAMING_SNAKE_CASE )
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return out_string
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [self.preprocess_text(_SCREAMING_SNAKE_CASE ) for t in text]
lowerCAmelCase = self.sp_model.encode(_SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
return token_ids
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.decode(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(_SCREAMING_SNAKE_CASE ) + F'{self.bos_token}Bot:'
)
return self.encode(text=_SCREAMING_SNAKE_CASE )
| 514 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :List[str] = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase ( __snake_case ):
a: str = "wav2vec2"
def __init__( self: Dict , __UpperCamelCase: Optional[int]=32 , __UpperCamelCase: Optional[int]=768 , __UpperCamelCase: Tuple=12 , __UpperCamelCase: List[Any]=12 , __UpperCamelCase: Tuple=3072 , __UpperCamelCase: Any="gelu" , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: int=0.1 , __UpperCamelCase: Tuple=0.0 , __UpperCamelCase: Any=0.0 , __UpperCamelCase: str=0.1 , __UpperCamelCase: str=0.1 , __UpperCamelCase: int=0.0_2 , __UpperCamelCase: Dict=1E-5 , __UpperCamelCase: Union[str, Any]="group" , __UpperCamelCase: List[str]="gelu" , __UpperCamelCase: List[str]=(512, 512, 512, 512, 512, 512, 512) , __UpperCamelCase: List[str]=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase: List[Any]=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase: Optional[int]=False , __UpperCamelCase: int=128 , __UpperCamelCase: Dict=16 , __UpperCamelCase: List[str]=False , __UpperCamelCase: Optional[Any]=True , __UpperCamelCase: Optional[Any]=0.0_5 , __UpperCamelCase: Union[str, Any]=10 , __UpperCamelCase: Optional[int]=2 , __UpperCamelCase: Union[str, Any]=0.0 , __UpperCamelCase: List[Any]=10 , __UpperCamelCase: Dict=0 , __UpperCamelCase: str=320 , __UpperCamelCase: List[Any]=2 , __UpperCamelCase: str=0.1 , __UpperCamelCase: List[str]=100 , __UpperCamelCase: int=256 , __UpperCamelCase: Any=256 , __UpperCamelCase: Optional[int]=0.1 , __UpperCamelCase: Union[str, Any]="sum" , __UpperCamelCase: Union[str, Any]=False , __UpperCamelCase: Dict=False , __UpperCamelCase: Union[str, Any]=256 , __UpperCamelCase: Optional[int]=(512, 512, 512, 512, 1500) , __UpperCamelCase: str=(5, 3, 3, 1, 1) , __UpperCamelCase: int=(1, 2, 3, 1, 1) , __UpperCamelCase: int=512 , __UpperCamelCase: List[str]=0 , __UpperCamelCase: Union[str, Any]=1 , __UpperCamelCase: Optional[int]=2 , __UpperCamelCase: Union[str, Any]=False , __UpperCamelCase: Any=3 , __UpperCamelCase: Any=2 , __UpperCamelCase: List[str]=3 , __UpperCamelCase: Union[str, Any]=None , __UpperCamelCase: Optional[int]=None , **__UpperCamelCase: Union[str, Any] , ):
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(__UpperCamelCase )
_a = list(__UpperCamelCase )
_a = list(__UpperCamelCase )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(__UpperCamelCase )
_a = list(__UpperCamelCase )
_a = list(__UpperCamelCase )
_a = xvector_output_dim
@property
def _A ( self: Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 487 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase :str = logging.get_logger(__name__)
class UpperCAmelCase ( __snake_case ):
a: Union[str, Any] = ["input_values", "attention_mask"]
def __init__( self: Dict , __UpperCamelCase: int = 1 , __UpperCamelCase: int = 1_6000 , __UpperCamelCase: float = 0.0 , __UpperCamelCase: bool = False , __UpperCamelCase: int = 80 , __UpperCamelCase: int = 16 , __UpperCamelCase: int = 64 , __UpperCamelCase: str = "hann_window" , __UpperCamelCase: float = 1.0 , __UpperCamelCase: float = 80 , __UpperCamelCase: float = 7600 , __UpperCamelCase: float = 1E-10 , __UpperCamelCase: int = 2 , __UpperCamelCase: bool = True , **__UpperCamelCase: List[Any] , ):
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
_a = do_normalize
_a = return_attention_mask
_a = num_mel_bins
_a = hop_length
_a = win_length
_a = win_function
_a = frame_signal_scale
_a = fmin
_a = fmax
_a = mel_floor
_a = reduction_factor
_a = win_length * sampling_rate // 1000
_a = hop_length * sampling_rate // 1000
_a = optimal_fft_length(self.sample_size )
_a = (self.n_fft // 2) + 1
_a = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
_a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , __UpperCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , __UpperCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _A ( __UpperCamelCase: List[np.ndarray] , __UpperCamelCase: List[np.ndarray] , __UpperCamelCase: float = 0.0 ):
if attention_mask is not None:
_a = np.array(__UpperCamelCase , np.intaa )
_a = []
for vector, length in zip(__UpperCamelCase , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__UpperCamelCase )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _A ( self: Optional[Any] , __UpperCamelCase: np.ndarray , ):
_a = spectrogram(
__UpperCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self: Optional[Any] , __UpperCamelCase: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCamelCase: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCamelCase: Union[bool, str, PaddingStrategy] = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , __UpperCamelCase: Optional[int] = None , **__UpperCamelCase: Any , ):
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
_a = self._process_audio(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )
else:
_a = None
if audio_target is not None:
_a = self._process_audio(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )
if inputs is None:
return inputs_target
else:
_a = inputs_target['''input_values''']
_a = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
_a = decoder_attention_mask
return inputs
def _A ( self: Optional[Any] , __UpperCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase: bool = False , __UpperCamelCase: Union[bool, str, PaddingStrategy] = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , **__UpperCamelCase: Optional[int] , ):
_a = isinstance(__UpperCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_a = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_a = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_a = speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [speech]
# needed to make pad() work on spectrogram inputs
_a = self.feature_size
# convert into correct format for padding
if is_target:
_a = [self._extract_mel_features(__UpperCamelCase ) for waveform in speech]
_a = BatchFeature({'''input_values''': features} )
_a = self.num_mel_bins
else:
_a = BatchFeature({'''input_values''': speech} )
_a = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
_a = feature_size_hack
# convert input values to correct format
_a = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
_a = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_a = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_a = input_values.astype(np.floataa )
# convert attention_mask to correct format
_a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_a = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_a = (
attention_mask
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_a = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=__UpperCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
def _A ( self: Optional[int] ):
_a = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_a = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 487 | 1 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a (lowerCamelCase ):
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__magic_name__ , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__magic_name__ , '''num_heads''' ) )
class __a :
def __init__( self : List[str] , __magic_name__ : Dict , __magic_name__ : Tuple=13 , __magic_name__ : Any=64 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Tuple=[16, 48, 96] , __magic_name__ : Optional[Any]=[1, 3, 6] , __magic_name__ : str=[1, 2, 10] , __magic_name__ : str=[7, 3, 3] , __magic_name__ : Dict=[4, 2, 2] , __magic_name__ : Any=[2, 1, 1] , __magic_name__ : str=[2, 2, 2] , __magic_name__ : Union[str, Any]=[False, False, True] , __magic_name__ : Dict=[0.0, 0.0, 0.0] , __magic_name__ : Optional[int]=0.0_2 , __magic_name__ : Dict=1E-12 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=True , __magic_name__ : int=2 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : List[Any] = patch_stride
UpperCAmelCase_ : Dict = patch_padding
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Union[str, Any] = embed_dim
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : int = stride_kv
UpperCAmelCase_ : Any = depth
UpperCAmelCase_ : Tuple = cls_token
UpperCAmelCase_ : str = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = CvtModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ )
UpperCAmelCase_ : Optional[int] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[str] = CvtForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Union[str, Any] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__a : Union[str, Any] = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
__a : Dict = False
__a : List[Any] = False
__a : Optional[Any] = False
__a : List[str] = False
__a : str = False
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = CvtModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = model_class(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : int ):
UpperCAmelCase_ : List[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Any = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : List[str] = len(self.model_tester.depth )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Any = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = CvtModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( ) -> str:
UpperCAmelCase_ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __a (unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ )
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Any = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(**__magic_name__ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 644 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class a__ ( A__ , A__ ):
UpperCAmelCase__ = '''swin'''
UpperCAmelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :int , _lowerCamelCase :str=224 , _lowerCamelCase :List[Any]=4 , _lowerCamelCase :Optional[Any]=3 , _lowerCamelCase :str=96 , _lowerCamelCase :List[Any]=[2, 2, 6, 2] , _lowerCamelCase :Dict=[3, 6, 12, 24] , _lowerCamelCase :Union[str, Any]=7 , _lowerCamelCase :Union[str, Any]=4.0 , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :int=0.0 , _lowerCamelCase :Any=0.0 , _lowerCamelCase :Union[str, Any]=0.1 , _lowerCamelCase :Optional[Any]="gelu" , _lowerCamelCase :Optional[int]=False , _lowerCamelCase :Any=0.02 , _lowerCamelCase :List[Any]=1E-5 , _lowerCamelCase :Optional[int]=32 , _lowerCamelCase :Any=None , _lowerCamelCase :Optional[Any]=None , **_lowerCamelCase :Optional[int] , ):
'''simple docstring'''
super().__init__(**__a )
UpperCamelCase_ : Any =image_size
UpperCamelCase_ : Any =patch_size
UpperCamelCase_ : Optional[Any] =num_channels
UpperCamelCase_ : Tuple =embed_dim
UpperCamelCase_ : str =depths
UpperCamelCase_ : int =len(__a )
UpperCamelCase_ : Dict =num_heads
UpperCamelCase_ : str =window_size
UpperCamelCase_ : str =mlp_ratio
UpperCamelCase_ : Dict =qkv_bias
UpperCamelCase_ : List[Any] =hidden_dropout_prob
UpperCamelCase_ : int =attention_probs_dropout_prob
UpperCamelCase_ : List[Any] =drop_path_rate
UpperCamelCase_ : List[str] =hidden_act
UpperCamelCase_ : int =use_absolute_embeddings
UpperCamelCase_ : Union[str, Any] =layer_norm_eps
UpperCamelCase_ : Tuple =initializer_range
UpperCamelCase_ : Any =encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase_ : int =int(embed_dim * 2 ** (len(__a ) - 1) )
UpperCamelCase_ : Optional[int] =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__a ) + 1 )]
UpperCamelCase_ : str =get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class a__ ( A__ ):
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return 1E-4
| 357 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=_UpperCamelCase )
lowerCamelCase__: List[str] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=_UpperCamelCase )
env_command_parser(subparsers=_UpperCamelCase )
launch_command_parser(subparsers=_UpperCamelCase )
tpu_command_parser(subparsers=_UpperCamelCase )
test_command_parser(subparsers=_UpperCamelCase )
# Let's go
lowerCamelCase__: int = parser.parse_args()
if not hasattr(_UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 306 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 238 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def A_ ( _lowerCAmelCase : dict , _lowerCAmelCase : str , _lowerCAmelCase : set , _lowerCAmelCase : set , _lowerCAmelCase : dict , _lowerCAmelCase : dict , _lowerCAmelCase : PriorityQueue , _lowerCAmelCase : dict , _lowerCAmelCase : float | int , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_lowerCamelCase : str = cst_fwd.get(_lowerCAmelCase , np.inf )
_lowerCamelCase : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_lowerCamelCase : Dict = new_cost_f
_lowerCamelCase : str = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_lowerCamelCase : List[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : dict , _lowerCAmelCase : dict ):
"""simple docstring"""
_lowerCamelCase : Dict = -1
_lowerCamelCase : List[Any] = set()
_lowerCamelCase : Optional[int] = set()
_lowerCamelCase : List[Any] = {source: 0}
_lowerCamelCase : Dict = {destination: 0}
_lowerCamelCase : List[str] = {source: None}
_lowerCamelCase : Optional[int] = {destination: None}
_lowerCamelCase : PriorityQueue[Any] = PriorityQueue()
_lowerCamelCase : PriorityQueue[Any] = PriorityQueue()
_lowerCamelCase : List[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_lowerCamelCase , _lowerCamelCase : List[str] = queue_forward.get()
visited_forward.add(_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[Any] = queue_backward.get()
visited_backward.add(_lowerCAmelCase )
_lowerCamelCase : List[Any] = pass_and_relaxation(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
_lowerCamelCase : List[str] = pass_and_relaxation(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_lowerCamelCase : Any = shortest_distance
return shortest_path_distance
UpperCAmelCase_ : Optional[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
UpperCAmelCase_ : Dict = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 44 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def UpperCamelCase__ ( ) -> None:
print('Making key files...' )
make_key_files('rsa' , 1024 )
print('Key files generation successful.' )
def UpperCamelCase__ ( A__ ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
snake_case__ : Dict = rabinMiller.generate_large_prime(A__ )
print('Generating prime q...' )
snake_case__ : Optional[Any] = rabinMiller.generate_large_prime(A__ )
snake_case__ : Dict = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
snake_case__ : Optional[Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(A__ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
snake_case__ : int = cryptoMath.find_mod_inverse(A__ , (p - 1) * (q - 1) )
snake_case__ : Optional[Any] = (n, e)
snake_case__ : List[Any] = (n, d)
return (public_key, private_key)
def UpperCamelCase__ ( A__ , A__ ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
snake_case__ , snake_case__ : List[str] = generate_key(A__ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 699 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase__ ( A__ , A__ , A__ ) -> List[str]:
snake_case__ : int = state_dict.pop(A__ )
snake_case__ : Union[str, Any] = val
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case__ : Optional[int] = value
else:
snake_case__ : Optional[int] = value
return new_state_dict
def UpperCamelCase__ ( A__ , A__=False ) -> Optional[int]:
snake_case__ : Optional[int] = ''
if is_panoptic:
snake_case__ : Tuple = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Union[str, Any] = in_proj_weight[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias[256:512]
snake_case__ : List[str] = in_proj_weight[-256:, :]
snake_case__ : Tuple = in_proj_bias[-256:]
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : str = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : Any = 'resnet101'
if "dc5" in model_name:
snake_case__ : Any = True
snake_case__ : int = 'panoptic' in model_name
if is_panoptic:
snake_case__ : str = 250
else:
snake_case__ : Union[str, Any] = 91
snake_case__ : Optional[int] = 'huggingface/label-files'
snake_case__ : Optional[Any] = 'coco-detection-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : Any = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : List[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
snake_case__ : List[Any] = ConditionalDetrImageProcessor(format=A__ )
# prepare image
snake_case__ : List[str] = prepare_img()
snake_case__ : Any = image_processor(images=A__ , return_tensors='pt' )
snake_case__ : Dict = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval()
snake_case__ : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : List[Any] = 'conditional_detr.' + src
rename_key(A__ , A__ , A__ )
snake_case__ : Dict = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Optional[int] = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : str = state_dict.pop(A__ )
snake_case__ : List[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case__ : Union[str, Any] = state_dict.pop(A__ )
snake_case__ : Dict = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case__ : List[Any] = state_dict.pop(A__ )
snake_case__ : Optional[int] = val
# finally, create HuggingFace model and load state dict
snake_case__ : Union[str, Any] = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case__ : Tuple = conditional_detr(A__ )
snake_case__ : str = model(A__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 699 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def snake_case_ ( __snake_case : List[Any]) -> Optional[int]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
A_ : List[Any] ='''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class __UpperCAmelCase ( __a ):
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=snake_case__ , required=snake_case__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=snake_case__ , required=snake_case__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=snake_case__ , required=snake_case__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=snake_case__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=snake_case__ , default=snake_case__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=snake_case__ )
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , ):
lowerCAmelCase_ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
lowerCAmelCase_ = model_type
lowerCAmelCase_ = tf_checkpoint
lowerCAmelCase_ = pytorch_dump_output
lowerCAmelCase_ = config
lowerCAmelCase_ = finetuning_task_name
def UpperCAmelCase_ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
if "ckpt" in self._tf_checkpoint.lower():
lowerCAmelCase_ = self._tf_checkpoint
lowerCAmelCase_ = ""
else:
lowerCAmelCase_ = self._tf_checkpoint
lowerCAmelCase_ = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case__ , self._config , self._pytorch_dump_output , snake_case__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 274 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""DeiTFeatureExtractor"""]
SCREAMING_SNAKE_CASE = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 199 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = 'encoder-decoder'
__UpperCAmelCase = True
def __init__( self , **a ):
super().__init__(**a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCamelCase__ = kwargs.pop("encoder" )
UpperCamelCase__ = encoder_config.pop("model_type" )
UpperCamelCase__ = kwargs.pop("decoder" )
UpperCamelCase__ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase__ = AutoConfig.for_model(a , **a )
UpperCamelCase__ = AutoConfig.for_model(a , **a )
UpperCamelCase__ = True
@classmethod
def __a ( cls , a , a , **a ):
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
UpperCamelCase__ = True
UpperCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a )
def __a ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.encoder.to_dict()
UpperCamelCase__ = self.decoder.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 223 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a__ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a__ : Union[str, Any] = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
a__ : int = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a__ : Optional[int] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a__ : Any = 'allenai'
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = dict((re.sub(R"@@$" , "" , __A ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __A ), v) for k, v in d.items() )
UpperCamelCase__ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
UpperCamelCase__ = d[k] # restore
return da
def _UpperCamelCase ( __A , __A ) -> Tuple:
'''simple docstring'''
assert os.path.exists(__A )
os.makedirs(__A , exist_ok=__A )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCamelCase__ = basename(__A )
UpperCamelCase__ = dirname(__A )
UpperCamelCase__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCamelCase__ = cls.hub_models()
UpperCamelCase__ = {"bpe": "fastbpe", "tokenizer": "moses"}
UpperCamelCase__ = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
UpperCamelCase__ = hub_utils.from_pretrained(
__A , __A , __A , archive_map=__A , **__A )
UpperCamelCase__ = vars(chkpt["args"]["model"] )
UpperCamelCase__ = args["source_lang"]
UpperCamelCase__ = args["target_lang"]
UpperCamelCase__ = dirname(__A )
UpperCamelCase__ = basename(__A )
# dicts
UpperCamelCase__ = os.path.join(__A , F'''dict.{src_lang}.txt''' )
UpperCamelCase__ = os.path.join(__A , F'''dict.{tgt_lang}.txt''' )
UpperCamelCase__ = Dictionary.load(__A )
UpperCamelCase__ = rewrite_dict_keys(src_dict.indices )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = os.path.join(__A , "vocab-src.json" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCamelCase__ = True
for k in src_vocab.keys():
if not k.islower():
UpperCamelCase__ = False
break
UpperCamelCase__ = Dictionary.load(__A )
UpperCamelCase__ = rewrite_dict_keys(tgt_dict.indices )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = os.path.join(__A , "vocab-tgt.json" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# merges_file (bpecodes)
UpperCamelCase__ = os.path.join(__A , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCamelCase__ = os.path.join(__A , __A )
if os.path.exists(__A ):
break
with open(__A , encoding="utf-8" ) as fin:
UpperCamelCase__ = fin.read()
UpperCamelCase__ = re.sub(R" \d+$" , "" , __A , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(__A , "w" , encoding="utf-8" ) as fout:
fout.write(__A )
# model config
UpperCamelCase__ = os.path.join(__A , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
UpperCamelCase__ = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
UpperCamelCase__ = 5
UpperCamelCase__ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCamelCase__ = best_score_hparams[model_dir]["length_penalty"]
else:
UpperCamelCase__ = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# tokenizer config
UpperCamelCase__ = os.path.join(__A , __A )
UpperCamelCase__ = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# model
UpperCamelCase__ = chkpt["models"][0]
UpperCamelCase__ = model.state_dict()
# rename keys to start with 'model.'
UpperCamelCase__ = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCamelCase__ = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(__A , __A )
UpperCamelCase__ = FSMTConfig.from_pretrained(__A )
UpperCamelCase__ = FSMTForConditionalGeneration(__A )
# check that it loads ok
model_new.load_state_dict(__A , strict=__A )
# save
UpperCamelCase__ = os.path.join(__A , __A )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(__A , __A )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__ : Optional[int] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 223 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase_ : List[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = 4
lowerCAmelCase_ : Optional[int] = 48
lowerCAmelCase_ : List[str] = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = [6, 6, 6, 6]
lowerCAmelCase_ : Tuple = 60
lowerCAmelCase_ : int = [6, 6, 6, 6]
lowerCAmelCase_ : Optional[Any] = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase_ : List[str] = 4
lowerCAmelCase_ : Optional[int] = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : Optional[Any] = 126
lowerCAmelCase_ : Dict = 7
lowerCAmelCase_ : List[str] = 255.0
lowerCAmelCase_ : int = ''
return config
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase_ : Tuple = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase_ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowerCAmelCase_ : List[Any] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowerCAmelCase_ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase_ : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase_ : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase_ : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCAmelCase_ : int = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCAmelCase_ : List[str] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCAmelCase_ : Any = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowerCAmelCase_ : Tuple = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase_ : List[Any] = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase_ : List[str] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase_ : int = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowerCAmelCase_ : Tuple = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowerCAmelCase_ : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase_ : List[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowerCAmelCase_ : Tuple = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowerCAmelCase_ : int = 'swin2sr.' + name
return name
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : int ) -> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : List[Any] = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
lowerCAmelCase_ : Optional[int] = key.split(""".""" )
lowerCAmelCase_ : Union[str, Any] = int(key_split[1] )
lowerCAmelCase_ : Tuple = int(key_split[4] )
lowerCAmelCase_ : Optional[int] = config.embed_dim
if "weight" in key:
lowerCAmelCase_ : List[Any] = val[:dim, :]
lowerCAmelCase_ : Union[str, Any] = val[dim : dim * 2, :]
lowerCAmelCase_ : Optional[Any] = val[-dim:, :]
else:
lowerCAmelCase_ : Tuple = val[:dim]
lowerCAmelCase_ : Any = val[dim : dim * 2]
lowerCAmelCase_ : Union[str, Any] = val[-dim:]
pass
else:
lowerCAmelCase_ : str = val
return orig_state_dict
def __UpperCamelCase ( lowercase__ : str , lowercase__ : str , lowercase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = get_config(__lowerCAmelCase )
lowerCAmelCase_ : Optional[Any] = SwinaSRForImageSuperResolution(__lowerCAmelCase )
model.eval()
lowerCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )
lowerCAmelCase_ : Union[str, Any] = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ : str = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(__lowerCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase_ : int = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase_ : Any = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("""RGB""" )
lowerCAmelCase_ : int = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase_ : Optional[int] = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase_ : int = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase_ : Any = transforms(__lowerCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase_ : Tuple = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase_ : Optional[int] = model(__lowerCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase_ : List[Any] = torch.Size([1, 3, 512, 512] )
lowerCAmelCase_ : int = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase_ : Tuple = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase_ : List[str] = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase_ : Dict = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase_ : Optional[Any] = torch.Size([1, 3, 512, 512] )
lowerCAmelCase_ : str = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __lowerCAmelCase , atol=1E-3 )
print("""Looks ok!""" )
lowerCAmelCase_ : Optional[Any] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase_ : Tuple = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
__UpperCAmelCase = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 600 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def A__( __lowerCAmelCase ):
_snake_case : Dict = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase ):
_snake_case , _snake_case : Any = emb.weight.shape
_snake_case : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
_snake_case : Optional[int] = emb.weight.data
return lin_layer
def A__( __lowerCAmelCase ):
_snake_case : List[str] = torch.load(__lowerCAmelCase , map_location='cpu' )
_snake_case : List[Any] = Namespace(**checkpoint['cfg']['model'] )
_snake_case : Optional[Any] = checkpoint['model']
remove_ignore_keys_(__lowerCAmelCase )
_snake_case : List[Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
_snake_case : Dict = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_snake_case : Optional[Any] = XGLMConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_snake_case : Optional[Any] = XGLMForCausalLM(__lowerCAmelCase )
_snake_case : Any = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
print(__lowerCAmelCase )
_snake_case : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 304 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A : str = random.Random()
def lowerCAmelCase_ ( a : Dict , a : int=1.0 , a : Optional[int]=None , a : Any=None ):
if rng is None:
a__ = global_rng
a__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ):
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = min_seq_length
a__ = max_seq_length
a__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ = padding_value
a__ = sampling_rate
a__ = return_attention_mask
a__ = do_normalize
a__ = feature_size
a__ = chunk_length
a__ = hop_length
def lowercase__ ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self , _a=False , _a=False ):
"""simple docstring"""
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
a__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Dict = WhisperFeatureExtractor if is_speech_available() else None
def lowercase__ ( self ):
"""simple docstring"""
a__ = WhisperFeatureExtractionTester(self )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
a__ = self.feature_extraction_class.from_pretrained(_a )
a__ = feat_extract_first.to_dict()
a__ = feat_extract_second.to_dict()
a__ = feat_extract_first.mel_filters
a__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(_a , 'feat_extract.json' )
feat_extract_first.to_json_file(_a )
a__ = self.feature_extraction_class.from_json_file(_a )
a__ = feat_extract_first.to_dict()
a__ = feat_extract_second.to_dict()
a__ = feat_extract_first.mel_filters
a__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a__ = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
a__ = feature_extractor(_a , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a__ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
a__ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
a__ = feature_extractor(_a , return_tensors='np' ).input_features
a__ = feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a__ = np.asarray(_a )
a__ = feature_extractor(_a , return_tensors='np' ).input_features
a__ = feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
a__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a__ = [np.asarray(_a ) for speech_input in speech_inputs]
a__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
a__ = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
a__ = feature_extractor(_a , return_tensors='np' ).input_features
a__ = feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
import torch
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = np.random.rand(100 , 32 ).astype(np.floataa )
a__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a__ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
a__ = ds.sort('id' ).select(range(_a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowercase__ ( self ):
"""simple docstring"""
a__ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
a__ = self._load_datasamples(1 )
a__ = WhisperFeatureExtractor()
a__ = feature_extractor(_a , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = self._load_datasamples(1 )[0]
a__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
a__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 714 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( a : int="ro" , a : Tuple="en" , a : Union[str, Any]="wmt16" , a : List[Any]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
a__ = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
a__ = datasets.load_dataset(a , a )
if save_dir is None:
a__ = f'''{dataset}-{pair}'''
a__ = Path(a )
save_dir.mkdir(exist_ok=a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
a__ = 'val' if split == 'validation' else split
a__ = save_dir.joinpath(f'''{fn}.source''' )
a__ = save_dir.joinpath(f'''{fn}.target''' )
a__ = src_path.open('w+' )
a__ = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
a__ = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 126 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __a , unittest.TestCase ):
"""simple docstring"""
__A = CodeGenTokenizer
__A = CodeGenTokenizerFast
__A = True
__A = {"add_prefix_space": True}
__A = False
def a ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase = {'unk_token': '<unk>'}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCAmelCase ) )
def a ( self : List[Any] , **__lowerCAmelCase : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a ( self : List[str] , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a ( self : int , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = 'lower newer'
_lowerCAmelCase = 'lower newer'
return input_text, output_text
def a ( self : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase = 'lower newer'
_lowerCAmelCase = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def a ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCAmelCase = 'lower newer'
# Testing tokenization
_lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
_lowerCAmelCase = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def a ( self : Union[str, Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : int ):
"""simple docstring"""
pass
def a ( self : Optional[int] , __lowerCAmelCase : int=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
_lowerCAmelCase = 'This is a simple input'
_lowerCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase = ('This is a simple input', 'This is a pair')
_lowerCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' , )
def a ( self : str ):
"""simple docstring"""
_lowerCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
_lowerCAmelCase = 'This is a simple input'
_lowerCAmelCase = ['This is a simple input looooooooong', 'This is a simple input']
_lowerCAmelCase = ('This is a simple input', 'This is a pair')
_lowerCAmelCase = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_lowerCAmelCase = tokenizer.pad_token_id
_lowerCAmelCase = tokenizer(__lowerCAmelCase , padding='max_length' , max_length=30 , return_tensors='np' )
_lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='np' )
_lowerCAmelCase = tokenizer(*__lowerCAmelCase , padding='max_length' , max_length=60 , return_tensors='np' )
_lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def a ( self : int ):
"""simple docstring"""
_lowerCAmelCase = '$$$'
_lowerCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
_lowerCAmelCase = 'This is a simple input'
_lowerCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase = tokenizer.bos_token_id
_lowerCAmelCase = tokenizer(__lowerCAmelCase )
_lowerCAmelCase = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def a ( self : int ):
"""simple docstring"""
_lowerCAmelCase = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
_lowerCAmelCase = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_lowerCAmelCase = '\nif len_a > len_b: result = a\nelse: result = b'
_lowerCAmelCase = tokenizer.encode(__lowerCAmelCase )
_lowerCAmelCase = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
_lowerCAmelCase = tokenizer.decode(__lowerCAmelCase , truncate_before_pattern=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a ( self : Tuple ):
"""simple docstring"""
pass
| 309 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case = logging.get_logger(__name__)
snake_case = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __a , __a ):
"""simple docstring"""
__A = "bit"
__A = ["preactivation", "bottleneck"]
__A = ["SAME", "VALID"]
def __init__( self : str , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Tuple=64 , __lowerCAmelCase : Optional[int]=[256, 512, 1024, 2048] , __lowerCAmelCase : str=[3, 4, 6, 3] , __lowerCAmelCase : int="preactivation" , __lowerCAmelCase : int="relu" , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=None , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCAmelCase = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
_lowerCAmelCase = num_channels
_lowerCAmelCase = embedding_size
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = layer_type
_lowerCAmelCase = hidden_act
_lowerCAmelCase = global_padding
_lowerCAmelCase = num_groups
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = embedding_dynamic_padding
_lowerCAmelCase = output_stride
_lowerCAmelCase = width_factor
_lowerCAmelCase = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 309 | 1 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCAmelCase = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCAmelCase = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
_lowerCAmelCase = "zero2"
_lowerCAmelCase = "zero3"
_lowerCAmelCase = [ZEROa, ZEROa]
def UpperCamelCase ( a , a , a ) -> Dict:
'''simple docstring'''
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__magic_name__ = parameterized.to_safe_name('''_'''.join(str(a ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( __a ):
@parameterized.expand(a__ , name_func=a__ )
def snake_case__ ( self : Optional[Any] , a__ : str , a__ : Tuple ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
@require_torch_multi_gpu
@parameterized.expand(a__ , name_func=a__ )
def snake_case__ ( self : int , a__ : Any , a__ : Dict ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
@parameterized.expand(a__ , name_func=a__ )
def snake_case__ ( self : Union[str, Any] , a__ : List[str] , a__ : Optional[Any] ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
@require_torch_multi_gpu
@parameterized.expand(a__ , name_func=a__ )
def snake_case__ ( self : Optional[int] , a__ : Union[str, Any] , a__ : List[str] ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
def snake_case__ ( self : List[Any] , a__ : List[str] ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def snake_case__ ( self : int , a__ : str , a__ : str , a__ : int = 10 , a__ : bool = True , a__ : bool = True , a__ : bool = True , ):
__magic_name__ = models[model]
__magic_name__ = self.run_trainer(
stage=a__ , model_name=a__ , eval_steps=a__ , num_train_epochs=1 , distributed=a__ , fpaa=a__ , )
self.do_checks(a__ )
return output_dir
def snake_case__ ( self : List[Any] , a__ : str , a__ : str , a__ : int = 10 , a__ : int = 1 , a__ : bool = True , a__ : bool = True , ):
__magic_name__ = self.get_auto_remove_tmp_dir('''./xxx''' , after=a__ )
__magic_name__ = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(a__ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__magic_name__ = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
__magic_name__ = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
__magic_name__ = self.get_launcher(a__ )
__magic_name__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a__ , env=self.get_env() )
return output_dir
def snake_case__ ( self : Dict , a__ : Any=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__magic_name__ = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 245 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
if isinstance(a , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _SCREAMING_SNAKE_CASE :
def snake_case__ ( self : List[Any] , a__ : Optional[int] , a__ : List[str] ):
pass
def snake_case__ ( self : Dict ):
pass
def snake_case__ ( self : Optional[int] ):
pass
def snake_case__ ( self : List[Any] , a__ : List[Any] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : str=None , **a__ : Dict ):
__magic_name__ = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : Any , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] , a__ : Any , a__ : Dict=None , **a__ : Optional[Any] ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , a__ : Tuple , a__ : Optional[Any] , a__ : int , a__ : Optional[int] , a__ : int=None , **a__ : int ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = {'''vision_model''': vision_model, '''text_model''': text_model}
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , a__ : List[Any] , a__ : Optional[int] , a__ : int , a__ : Union[str, Any] , a__ : Tuple=None , **a__ : Union[str, Any] ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
__magic_name__ = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
__magic_name__ = TFVisionTextDualEncoderModel.from_pretrained(a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
__magic_name__ = after_output[0].numpy()
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
def snake_case__ ( self : str , a__ : Any , a__ : Optional[int] , a__ : List[str] , a__ : Tuple , a__ : Optional[Any]=None , **a__ : int ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
__magic_name__ = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = to_atuple(vision_model.config.image_size )
__magic_name__ = to_atuple(vision_model.config.patch_size )
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : Optional[Any] , a__ : np.ndarray , a__ : np.ndarray , a__ : float ):
__magic_name__ = np.abs((a - b) ).max()
self.assertLessEqual(a__ , a__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case__ ( self : Tuple ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a__ )
def snake_case__ ( self : int ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_save_load(**a__ )
def snake_case__ ( self : Tuple ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a__ )
@slow
def snake_case__ ( self : int ):
__magic_name__ , __magic_name__ = self.get_pretrained_model_and_inputs()
__magic_name__ = model_a(**a__ )
__magic_name__ = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a__ )
__magic_name__ = TFVisionTextDualEncoderModel.from_pretrained(a__ )
__magic_name__ = model_a(**a__ )
__magic_name__ = after_outputs[0].numpy()
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
def snake_case__ ( self : int ):
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
__magic_name__ = 13
__magic_name__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , a__ : Any , a__ : List[Any] ):
__magic_name__ = TFViTModel(a__ , name='''vision_model''' )
__magic_name__ = TFBertModel(a__ , name='''text_model''' )
return vision_model, text_model
def snake_case__ ( self : List[str] ):
__magic_name__ = TFViTModelTester(self )
__magic_name__ = TFBertModelTester(self )
__magic_name__ = vit_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
def snake_case__ ( self : Tuple ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
__magic_name__ = 13
__magic_name__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Dict , a__ : Any , a__ : Tuple , a__ : str , a__ : Any , a__ : Union[str, Any]=None , **a__ : List[str] ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
__magic_name__ = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__magic_name__ = to_atuple(vision_model.config.image_size )
__magic_name__ = to_atuple(vision_model.config.patch_size )
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : str , a__ : int , a__ : Any ):
__magic_name__ = TFDeiTModel(a__ , name='''vision_model''' )
__magic_name__ = TFRobertaModel(a__ , name='''text_model''' )
return vision_model, text_model
def snake_case__ ( self : Dict ):
__magic_name__ = TFDeiTModelTester(self )
__magic_name__ = TFRobertaModelTester(self )
__magic_name__ = vit_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
def snake_case__ ( self : List[str] ):
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
__magic_name__ = 13
__magic_name__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : int , a__ : int , a__ : Dict ):
__magic_name__ = TFCLIPVisionModel(a__ , name='''vision_model''' )
__magic_name__ = TFBertModel(a__ , name='''text_model''' )
return vision_model, text_model
def snake_case__ ( self : str ):
__magic_name__ = TFCLIPVisionModelTester(self )
__magic_name__ = TFBertModelTester(self )
__magic_name__ = clip_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=a__ )
__magic_name__ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=a__ , padding=a__ , return_tensors='''np''' )
__magic_name__ = model(**a__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a__ , atol=1E-3 ) )
| 245 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Union[str, Any] = """deta"""
snake_case : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=900 , __lowerCAmelCase=2048 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=1024 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1.0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="sine" , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=True , __lowerCAmelCase=300 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.25 , **__lowerCAmelCase , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = backbone_config.pop("""model_type""" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(__lowerCAmelCase )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_queries
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
# deformable attributes
UpperCamelCase__ = num_feature_levels
UpperCamelCase__ = encoder_n_points
UpperCamelCase__ = decoder_n_points
UpperCamelCase__ = two_stage
UpperCamelCase__ = two_stage_num_proposals
UpperCamelCase__ = with_box_refine
UpperCamelCase__ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
UpperCamelCase__ = focal_alpha
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self ):
return self.d_model
def _lowerCamelCase ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 619 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """encoder-decoder"""
snake_case : Optional[int] = True
def __init__( self , **__lowerCAmelCase ):
super().__init__(**__lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCamelCase__ = kwargs.pop("""encoder""" )
UpperCamelCase__ = encoder_config.pop("""model_type""" )
UpperCamelCase__ = kwargs.pop("""decoder""" )
UpperCamelCase__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase__ = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = True
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase__ = True
UpperCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.encoder.to_dict()
UpperCamelCase__ = self.decoder.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 619 | 1 |
from bisect import bisect
from itertools import accumulate
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : int = sorted(zip(_lowerCAmelCase ,_lowerCAmelCase ) ,key=lambda _lowerCAmelCase : x[0] / x[1] ,reverse=_lowerCAmelCase )
A_ : Optional[int] = [i[0] for i in r], [i[1] for i in r]
A_ : Dict = list(accumulate(_lowerCAmelCase ) )
A_ : Tuple = bisect(_lowerCAmelCase ,_lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
'''simple docstring'''
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Tuple = constant_matrix.shape
if rowsa != colsa:
A_ : int = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
A_ : List[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
A_ : str = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
A_ : Any = (
"""Number of initial values must be equal to number of rows in coefficient """
f"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
A_ : Union[str, Any] = []
for row in range(_lowerCAmelCase ):
A_ : str = 0
for col in range(_lowerCAmelCase ):
if col == row:
A_ : Optional[Any] = table[row][col]
elif col == cols - 1:
A_ : List[str] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : str = (temp + val) / denom
new_val.append(_lowerCAmelCase )
A_ : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ , A_ : str = table.shape
A_ : Any = True
for i in range(0 ,_lowerCAmelCase ):
A_ : Optional[Any] = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 481 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowercase__ : str = _symbol_database.Default()
lowercase__ : Optional[int] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
lowercase__ : Optional[int] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowercase__ : int = None
lowercase__ : List[Any] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowercase__ : Union[str, Any] = 45
lowercase__ : Optional[Any] = 1581
lowercase__ : Dict = 1517
lowercase__ : Any = 1570
lowercase__ : Dict = 1584
lowercase__ : Any = 1793
lowercase__ : List[str] = 1795
lowercase__ : Union[str, Any] = 1916
lowercase__ : Union[str, Any] = 1864
lowercase__ : str = 1905
lowercase__ : List[Any] = 1919
lowercase__ : Tuple = 2429
lowercase__ : Optional[int] = 2208
lowercase__ : Dict = 2418
lowercase__ : Any = 2323
lowercase__ : Any = 2407
# @@protoc_insertion_point(module_scope)
| 390 | '''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase_ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
UpperCAmelCase_ = F"""{src_lang}-{tgt_lang}"""
UpperCAmelCase_ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''README.md''' )
print(F"""Generating {path}""" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
lowercase__ : Dict = Path(__file__).resolve().parent.parent.parent
lowercase__ : Any = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowercase__ : str = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 390 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1)) for i in range(1_000)]
lowerCAmelCase__ = generate_large_matrix()
lowerCAmelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __UpperCAmelCase ( lowerCamelCase_) -> None:
assert all(row == sorted(lowerCamelCase_ , reverse=lowerCamelCase_) for row in grid)
assert all(list(lowerCamelCase_) == sorted(lowerCamelCase_ , reverse=lowerCamelCase_) for col in zip(*lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : List[str] = len(lowerCamelCase_) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCamelCase__ : int = (left + right) // 2
UpperCamelCase__ : int = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCamelCase__ : Union[str, Any] = mid + 1
else:
UpperCamelCase__ : int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : Tuple = len(grid[0])
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : Dict = find_negative_index(grid[i][:bound])
total += bound
return (len(lowerCamelCase_) * len(grid[0])) - total
def __UpperCAmelCase ( lowerCamelCase_) -> int:
return len([number for row in grid for number in row if number < 0])
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = 0
for row in grid:
for i, number in enumerate(lowerCamelCase_):
if number < 0:
total += len(lowerCamelCase_) - i
break
return total
def __UpperCAmelCase ( ) -> None:
from timeit import timeit
print('Running benchmarks')
UpperCamelCase__ : Optional[int] = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCamelCase__ : Any = timeit(f'{func}(grid=grid)' , setup=lowerCamelCase_ , number=500)
print(f'{func}() took {time:0.4f} seconds')
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =ShapEPipeline
__a =['prompt']
__a =['prompt']
__a =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__a =False
@property
def UpperCamelCase__ ( self : str ):
return 32
@property
def UpperCamelCase__ ( self : Any ):
return 32
@property
def UpperCamelCase__ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self : int ):
return 8
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase__ ( self : Any ):
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__a )
@property
def UpperCamelCase__ ( self : int ):
torch.manual_seed(0 )
_a = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_a = PriorTransformer(**__a )
return model
@property
def UpperCamelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
_a = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_a = ShapERenderer(**__a )
return model
def UpperCamelCase__ ( self : Dict ):
_a = self.dummy_prior
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_renderer
_a = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
_a = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCamelCase__ ( self : List[Any] , __a : List[Any] , __a : Optional[Any]=0 ):
if str(__a ).startswith("mps" ):
_a = torch.manual_seed(__a )
else:
_a = torch.Generator(device=__a ).manual_seed(__a )
_a = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCamelCase__ ( self : List[str] ):
_a = "cpu"
_a = self.get_dummy_components()
_a = self.pipeline_class(**__a )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = pipe(**self.get_dummy_inputs(__a ) )
_a = output.images[0]
_a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : int ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self : Tuple ):
_a = torch_device == "cpu"
_a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def UpperCamelCase__ ( self : List[str] ):
_a = self.get_dummy_components()
_a = self.pipeline_class(**__a )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = 1
_a = 2
_a = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
_a = batch_size * [inputs[key]]
_a = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : Dict ):
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
_a = ShapEPipeline.from_pretrained("openai/shap-e" )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = torch.Generator(device=__a ).manual_seed(0 )
_a = pipe(
"a shark" , generator=__a , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 692 |
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ : List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 692 | 1 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowercase_ ( _lowerCamelCase: int ) -> Any:
'''simple docstring'''
random.seed(_lowerCamelCase )
np.random.seed(_lowerCamelCase )
torch.manual_seed(_lowerCamelCase )
torch.cuda.manual_seed_all(_lowerCamelCase )
# ^^ safe to call this function even if cuda is not available
class _snake_case :
def __init__( self : List[Any] , UpperCAmelCase : Iterable[torch.nn.Parameter] , UpperCAmelCase : float = 0.9_9_9_9 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 0 , UpperCAmelCase : bool = False , UpperCAmelCase : Union[float, int] = 1.0 , UpperCAmelCase : Union[float, int] = 2 / 3 , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : Dict[str, Any] = None , **UpperCAmelCase : List[str] , ):
if isinstance(UpperCAmelCase , torch.nn.Module ):
__lowerCamelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase , )
__lowerCamelCase : Union[str, Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__lowerCamelCase : List[Any] = True
if kwargs.get("max_value" , UpperCAmelCase ) is not None:
__lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase )
__lowerCamelCase : Optional[int] = kwargs["max_value"]
if kwargs.get("min_value" , UpperCAmelCase ) is not None:
__lowerCamelCase : str = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase )
__lowerCamelCase : List[Any] = kwargs["min_value"]
__lowerCamelCase : List[str] = list(UpperCAmelCase )
__lowerCamelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , UpperCAmelCase ) is not None:
__lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase )
self.to(device=kwargs["device"] )
__lowerCamelCase : str = None
__lowerCamelCase : Optional[int] = decay
__lowerCamelCase : Optional[Any] = min_decay
__lowerCamelCase : int = update_after_step
__lowerCamelCase : List[str] = use_ema_warmup
__lowerCamelCase : List[Any] = inv_gamma
__lowerCamelCase : Optional[Any] = power
__lowerCamelCase : Tuple = 0
__lowerCamelCase : str = None # set in `step()`
__lowerCamelCase : Tuple = model_cls
__lowerCamelCase : int = model_config
@classmethod
def lowerCamelCase__ ( cls : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ):
__lowerCamelCase , __lowerCamelCase : List[Any] = model_cls.load_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
__lowerCamelCase : Dict = model_cls.from_pretrained(UpperCAmelCase )
__lowerCamelCase : List[str] = cls(model.parameters() , model_cls=UpperCAmelCase , model_config=model.config )
ema_model.load_state_dict(UpperCAmelCase )
return ema_model
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
__lowerCamelCase : Dict = self.model_cls.from_config(self.model_config )
__lowerCamelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , UpperCAmelCase )
model.register_to_config(**UpperCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : int ):
__lowerCamelCase : Tuple = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__lowerCamelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__lowerCamelCase : int = (1 + step) / (10 + step)
__lowerCamelCase : Dict = min(UpperCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
__lowerCamelCase : str = max(UpperCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if isinstance(UpperCAmelCase , torch.nn.Module ):
__lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase , )
__lowerCamelCase : Tuple = parameters.parameters()
__lowerCamelCase : Optional[Any] = list(UpperCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__lowerCamelCase : List[str] = self.get_decay(self.optimization_step )
__lowerCamelCase : Optional[Any] = decay
__lowerCamelCase : Union[str, Any] = 1 - decay
__lowerCamelCase : Tuple = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , UpperCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__lowerCamelCase : Union[str, Any] = deepspeed.zero.GatheredParameters(UpperCAmelCase , modifier_rank=UpperCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Iterable[torch.nn.Parameter] ):
__lowerCamelCase : Optional[Any] = list(UpperCAmelCase )
for s_param, param in zip(self.shadow_params , UpperCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase__ ( self : str , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None ):
__lowerCamelCase : Any = [
p.to(device=UpperCAmelCase , dtype=UpperCAmelCase ) if p.is_floating_point() else p.to(device=UpperCAmelCase )
for p in self.shadow_params
]
def lowerCamelCase__ ( self : Union[str, Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Iterable[torch.nn.Parameter] ):
__lowerCamelCase : int = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , UpperCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
__lowerCamelCase : List[Any] = None
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : dict ):
__lowerCamelCase : Any = copy.deepcopy(UpperCAmelCase )
__lowerCamelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
__lowerCamelCase : str = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , UpperCAmelCase ):
raise ValueError("Invalid min_decay" )
__lowerCamelCase : Union[str, Any] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , UpperCAmelCase ):
raise ValueError("Invalid optimization_step" )
__lowerCamelCase : Tuple = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , UpperCAmelCase ):
raise ValueError("Invalid update_after_step" )
__lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , UpperCAmelCase ):
raise ValueError("Invalid use_ema_warmup" )
__lowerCamelCase : List[Any] = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
__lowerCamelCase : List[str] = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
__lowerCamelCase : Tuple = state_dict.get("shadow_params" , UpperCAmelCase )
if shadow_params is not None:
__lowerCamelCase : int = shadow_params
if not isinstance(self.shadow_params , UpperCAmelCase ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 366 | """simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _snake_case :
def __init__( self : str ):
__lowerCamelCase : Optional[Any] = psutil.Process()
__lowerCamelCase : List[Any] = False
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = -1
while True:
__lowerCamelCase : Union[str, Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
__lowerCamelCase : Optional[Any] = True
self.thread.start()
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : str = False
self.thread.join()
return self.cpu_memory_peak
__A = PeakCPUMemory()
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Any = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCamelCase : Optional[int] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCamelCase : int = torch.cuda.memory_allocated(_lowerCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Tuple = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCamelCase : Dict = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCamelCase : Tuple = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCamelCase : Any = (torch.cuda.memory_allocated(_lowerCamelCase ) - start_measures[str(_lowerCamelCase )]) / 2**20
__lowerCamelCase : Optional[int] = (torch.cuda.max_memory_allocated(_lowerCamelCase ) - start_measures[str(_lowerCamelCase )]) / 2**20
return measures
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: List[Any] ) -> Optional[int]:
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_lowerCamelCase )]:.2f}MiB""" )
__lowerCamelCase : List[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" ) | 366 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
assert isinstance(UpperCamelCase_ ,UpperCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Tuple:
A_ = tmp_path / 'cache'
A_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ = SqlDatasetReader(
'dataset' ,'sqlite:///' + sqlite_path ,cache_dir=UpperCamelCase_ ,keep_in_memory=UpperCamelCase_ ).read()
_check_sql_dataset(UpperCamelCase_ ,UpperCamelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
A_ = tmp_path / 'cache'
A_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = SqlDatasetReader('dataset' ,'sqlite:///' + sqlite_path ,features=UpperCamelCase_ ,cache_dir=UpperCamelCase_ ).read()
_check_sql_dataset(UpperCamelCase_ ,UpperCamelCase_ )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
with contextlib.closing(sqlitea.connect(UpperCamelCase_ ) ) as con:
A_ = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
A_ = tmp_path / 'cache'
A_ = os.path.join(UpperCamelCase_ ,'tmp.sql' )
A_ = SqlDatasetReader('dataset' ,'sqlite:///' + sqlite_path ,cache_dir=UpperCamelCase_ ).read()
SqlDatasetWriter(UpperCamelCase_ ,'dataset' ,'sqlite:///' + output_sqlite_path ,num_proc=1 ).write()
A_ = iter_sql_file(UpperCamelCase_ )
A_ = iter_sql_file(UpperCamelCase_ )
for rowa, rowa in zip(UpperCamelCase_ ,UpperCamelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
A_ = tmp_path / 'cache'
A_ = os.path.join(UpperCamelCase_ ,'tmp.sql' )
A_ = SqlDatasetReader('dataset' ,'sqlite:///' + sqlite_path ,cache_dir=UpperCamelCase_ ).read()
SqlDatasetWriter(UpperCamelCase_ ,'dataset' ,'sqlite:///' + output_sqlite_path ,num_proc=2 ).write()
A_ = iter_sql_file(UpperCamelCase_ )
A_ = iter_sql_file(UpperCamelCase_ )
for rowa, rowa in zip(UpperCamelCase_ ,UpperCamelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
A_ = tmp_path / 'cache'
A_ = os.path.join(UpperCamelCase_ ,'tmp.sql' )
A_ = SqlDatasetReader('dataset' ,'sqlite:///' + sqlite_path ,cache_dir=UpperCamelCase_ ).read()
with pytest.raises(UpperCamelCase_ ):
SqlDatasetWriter(UpperCamelCase_ ,'dataset' ,'sqlite:///' + output_sqlite_path ,num_proc=0 ).write()
| 366 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = 'levit'
def __init__( self : Union[str, Any] , __snake_case : str=224 , __snake_case : Optional[Any]=3 , __snake_case : str=3 , __snake_case : Optional[Any]=2 , __snake_case : List[str]=1 , __snake_case : Tuple=16 , __snake_case : Union[str, Any]=[128, 256, 384] , __snake_case : Optional[Any]=[4, 8, 12] , __snake_case : str=[4, 4, 4] , __snake_case : int=[16, 16, 16] , __snake_case : Union[str, Any]=0 , __snake_case : int=[2, 2, 2] , __snake_case : Dict=[2, 2, 2] , __snake_case : Optional[Any]=0.02 , **__snake_case : Tuple , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
lowerCamelCase = image_size
lowerCamelCase = num_channels
lowerCamelCase = kernel_size
lowerCamelCase = stride
lowerCamelCase = padding
lowerCamelCase = hidden_sizes
lowerCamelCase = num_attention_heads
lowerCamelCase = depths
lowerCamelCase = key_dim
lowerCamelCase = drop_path_rate
lowerCamelCase = patch_size
lowerCamelCase = attention_ratio
lowerCamelCase = mlp_ratio
lowerCamelCase = initializer_range
lowerCamelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = version.parse('1.11' )
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-4
| 246 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ):
"""simple docstring"""
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = 100
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Tuple = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Dict = is_training
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Tuple = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : List[Any] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : int = initializer_range
UpperCamelCase : str = scope
UpperCamelCase : List[Any] = out_indices
UpperCamelCase : Tuple = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase : List[Any] = (image_size // patch_size) ** 2
UpperCamelCase : List[Any] = num_patches + 1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Dict = None
UpperCamelCase : Optional[Any] = None
if self.use_labels:
UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = BeitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = BeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.type_sequence_label_size
UpperCamelCase : Optional[Any] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : Any = 1
UpperCamelCase : Optional[int] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = self.num_labels
UpperCamelCase : Dict = BeitForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase : str = config_and_inputs
UpperCamelCase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : Any = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[str] = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeitModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]:
continue
UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase : Dict = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase : int = False
UpperCamelCase : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase : Optional[int] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Any = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = BeitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( ):
"""simple docstring"""
UpperCamelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# prepare bool_masked_pos
UpperCamelCase : Tuple = torch.ones((1, 196) , dtype=torch.bool ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : List[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = outputs.logits
# verify the logits
UpperCamelCase : int = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-2 ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.default_image_processor
UpperCamelCase : List[str] = prepare_img()
UpperCamelCase : List[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : Dict = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
UpperCamelCase : Union[str, Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self.default_image_processor
UpperCamelCase : Dict = prepare_img()
UpperCamelCase : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : Any = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = outputs.logits
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
UpperCamelCase : List[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCamelCase : List[str] = model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=640 , do_center_crop=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCamelCase : Any = Image.open(ds[0]['''file'''] )
UpperCamelCase : List[str] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : List[Any] = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = outputs.logits
# verify the logits
UpperCamelCase : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
UpperCamelCase : Optional[Any] = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=__SCREAMING_SNAKE_CASE , )
else:
UpperCamelCase : Any = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCamelCase : str = model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=640 , do_center_crop=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCamelCase : Dict = Image.open(ds[0]['''file'''] )
UpperCamelCase : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = outputs.logits.detach().cpu()
UpperCamelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] )
UpperCamelCase : Tuple = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 717 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643 | 0 |
from collections.abc import Sequence
def __UpperCamelCase ( _A , _A = False ):
if not arr:
return 0
lowerCAmelCase_ = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase_ = 0.0
for num in arr:
lowerCAmelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase_ = max(_A , _A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 431 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_A = logging.get_logger(__name__)
_A = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
_A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
_A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A ( __UpperCAmelCase ):
__snake_case = 'whisper'
__snake_case = ['past_key_values']
__snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, UpperCamelCase__=5_1865, UpperCamelCase__=80, UpperCamelCase__=6, UpperCamelCase__=4, UpperCamelCase__=6, UpperCamelCase__=4, UpperCamelCase__=1536, UpperCamelCase__=1536, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=5_0257, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__="gelu", UpperCamelCase__=256, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=False, UpperCamelCase__=1500, UpperCamelCase__=448, UpperCamelCase__=5_0256, UpperCamelCase__=5_0256, UpperCamelCase__=5_0256, UpperCamelCase__=None, UpperCamelCase__=[220, 5_0256], UpperCamelCase__=False, UpperCamelCase__=256, UpperCamelCase__=False, UpperCamelCase__=0.05, UpperCamelCase__=10, UpperCamelCase__=2, UpperCamelCase__=0.0, UpperCamelCase__=10, UpperCamelCase__=0, UpperCamelCase__=7, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = num_mel_bins
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ = max_source_positions
lowerCAmelCase_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ = classifier_proj_size
lowerCAmelCase_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ = apply_spec_augment
lowerCAmelCase_ = mask_time_prob
lowerCAmelCase_ = mask_time_length
lowerCAmelCase_ = mask_time_min_masks
lowerCAmelCase_ = mask_feature_prob
lowerCAmelCase_ = mask_feature_length
lowerCAmelCase_ = mask_feature_min_masks
lowerCAmelCase_ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, suppress_tokens=UpperCamelCase__, begin_suppress_tokens=UpperCamelCase__, **UpperCamelCase__, )
class A ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ = {0: '''batch'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__, direction='''inputs''' )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = -1, UpperCamelCase__ = -1, UpperCamelCase__ = False, UpperCamelCase__ = None, UpperCamelCase__ = 2_2050, UpperCamelCase__ = 5.0, UpperCamelCase__ = 220, ):
"""simple docstring"""
lowerCAmelCase_ = OrderedDict()
lowerCAmelCase_ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=UpperCamelCase__, framework=UpperCamelCase__, sampling_rate=UpperCamelCase__, time_duration=UpperCamelCase__, frequency=UpperCamelCase__, )
lowerCAmelCase_ = encoder_inputs['''input_features'''].shape[2]
lowerCAmelCase_ = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCAmelCase_ = super().generate_dummy_inputs(
preprocessor.tokenizer, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = encoder_inputs.pop('''input_features''' )
lowerCAmelCase_ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowerCAmelCase_ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-3
| 431 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[str] = XLMRobertaTokenizer
__lowerCAmelCase : Tuple = XLMRobertaTokenizerFast
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Union[str, Any] = True
def __lowerCamelCase ( self :int ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Union[str, Any] = XLMRobertaTokenizer(__lowercase ,keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :str ):
snake_case__ : Tuple = '''<pad>'''
snake_case__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) ,__lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-1] ,'''<mask>''' )
self.assertEqual(len(__lowercase ) ,1_0_0_2 )
def __lowerCamelCase ( self :Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_2 )
def __lowerCamelCase ( self :int ):
snake_case__ : int = XLMRobertaTokenizer(__lowercase ,keep_accents=__lowercase )
snake_case__ : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
snake_case__ : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
snake_case__ : List[str] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def __lowerCamelCase ( self :Dict ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : Union[str, Any] = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : Optional[Any] = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
snake_case__ : List[str] = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case__ : List[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase ,__lowercase )
# Checks everything loads correctly in the same way
snake_case__ : int = tokenizer_r.from_pretrained(__lowercase )
snake_case__ : int = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase ,__lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : Dict = tokenizer_r.save_pretrained(__lowercase ,legacy_format=__lowercase )
snake_case__ : List[Any] = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase ,__lowercase )
# Checks everything loads correctly in the same way
snake_case__ : List[str] = tokenizer_r.from_pretrained(__lowercase )
snake_case__ : str = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase ,__lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__lowercase ,legacy_format=__lowercase )
snake_case__ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__lowercase )
snake_case__ : Optional[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase ,__lowercase ) )
shutil.rmtree(__lowercase )
@cached_property
def __lowerCamelCase ( self :Optional[int] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __lowerCamelCase ( self :Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowercase ,f.name )
snake_case__ : Union[str, Any] = XLMRobertaTokenizer(f.name ,keep_accents=__lowercase )
snake_case__ : List[Any] = pickle.dumps(__lowercase )
pickle.loads(__lowercase )
def __lowerCamelCase ( self :List[Any] ):
if not self.test_rust_tokenizer:
return
snake_case__ : str = self.get_tokenizer()
snake_case__ : List[str] = self.get_rust_tokenizer()
snake_case__ : str = '''I was born in 92000, and this is falsé.'''
snake_case__ : int = tokenizer.tokenize(__lowercase )
snake_case__ : Union[str, Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : Dict = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : Union[str, Any] = rust_tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : Tuple = self.get_rust_tokenizer()
snake_case__ : Optional[Any] = tokenizer.encode(__lowercase )
snake_case__ : Union[str, Any] = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
@slow
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : str = '''Hello World!'''
snake_case__ : List[str] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowercase ,self.big_tokenizer.encode(__lowercase ) )
@slow
def __lowerCamelCase ( self :int ):
snake_case__ : List[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case__ : Any = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowercase ,self.big_tokenizer.encode(__lowercase ) )
@slow
def __lowerCamelCase ( self :int ):
# fmt: off
snake_case__ : Optional[int] = {'''input_ids''': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase ,model_name='''xlm-roberta-base''' ,revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' ,)
| 721 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : str = ShapEPipeline
__lowerCAmelCase : Union[str, Any] = ["""prompt"""]
__lowerCAmelCase : Union[str, Any] = ["""prompt"""]
__lowerCAmelCase : Tuple = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase : Optional[Any] = False
@property
def __lowerCamelCase ( self :Dict ):
return 3_2
@property
def __lowerCamelCase ( self :str ):
return 3_2
@property
def __lowerCamelCase ( self :Optional[int] ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self :int ):
return 8
@property
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCamelCase ( self :int ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(__lowercase )
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
snake_case__ : Optional[int] = PriorTransformer(**__lowercase )
return model
@property
def __lowerCamelCase ( self :Optional[int] ):
torch.manual_seed(0 )
snake_case__ : Dict = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
snake_case__ : List[str] = ShapERenderer(**__lowercase )
return model
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = self.dummy_prior
snake_case__ : Optional[Any] = self.dummy_text_encoder
snake_case__ : List[Any] = self.dummy_tokenizer
snake_case__ : Optional[int] = self.dummy_renderer
snake_case__ : str = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=__lowercase ,clip_sample=__lowercase ,clip_sample_range=1.0 ,)
snake_case__ : Tuple = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :int ,__lowercase :str=0 ):
if str(__lowercase ).startswith('''mps''' ):
snake_case__ : List[str] = torch.manual_seed(__lowercase )
else:
snake_case__ : Optional[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
snake_case__ : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = '''cpu'''
snake_case__ : str = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__lowercase )
snake_case__ : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(__lowercase ) )
snake_case__ : Dict = output.images[0]
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
snake_case__ : Tuple = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = torch_device == '''cpu'''
snake_case__ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=__lowercase ,relax_max_difference=__lowercase ,)
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**__lowercase )
snake_case__ : str = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Any = 1
snake_case__ : str = 2
snake_case__ : Any = self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
snake_case__ : Optional[Any] = batch_size * [inputs[key]]
snake_case__ : Any = pipe(**__lowercase ,num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
snake_case__ : Optional[int] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
snake_case__ : Any = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(0 )
snake_case__ : Tuple = pipe(
'''a shark''' ,generator=__lowercase ,guidance_scale=15.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__lowercase ,__lowercase )
| 219 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : str=16 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=0 , ):
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Dict =batch_size
lowercase : List[Any] =seq_length
lowercase : Optional[Any] =is_training
lowercase : Tuple =use_input_mask
lowercase : Dict =use_token_type_ids
lowercase : Any =use_labels
lowercase : List[Any] =vocab_size
lowercase : int =hidden_size
lowercase : List[Any] =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : Optional[Any] =intermediate_size
lowercase : str =hidden_act
lowercase : Optional[Any] =hidden_dropout_prob
lowercase : Any =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : Dict =type_vocab_size
lowercase : List[str] =type_sequence_label_size
lowercase : Union[str, Any] =initializer_range
lowercase : Tuple =num_labels
lowercase : Any =num_choices
lowercase : Dict =scope
lowercase : Any =projection_dim
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Dict =None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase : Tuple =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[Any] =None
if self.use_token_type_ids:
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Union[str, Any] =None
lowercase : Optional[Any] =None
lowercase : Optional[int] =None
if self.use_labels:
lowercase : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : int =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
lowercase : List[Any] =DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =TFDPRContextEncoder(config=UpperCAmelCase__ )
lowercase : List[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : str =TFDPRQuestionEncoder(config=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Optional[Any] =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Any =model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =TFDPRReader(config=UpperCAmelCase__ )
lowercase : Tuple =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCamelCase_ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =TFDPRModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] =TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] =TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Tuple =TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[str] =TFDPRReader.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowercase : Any =tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowercase : int =model(UpperCAmelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase : Any =tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 92 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Any =0.0_0
lowercase : Tuple =0
for resistor in resistors:
if resistor <= 0:
lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__magic_name__ )
first_sum += 1 / float(__magic_name__ )
index += 1
return 1 / first_sum
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Optional[Any] =0.0_0
lowercase : int =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase : Tuple =f'''Resistor at index {index} has a negative value!'''
raise ValueError(__magic_name__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
_UpperCAmelCase = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def _lowerCamelCase ( _a ):
"""simple docstring"""
assert type(_a ) in (int, float) and decimal == int(_a )
_lowerCamelCase = int(_a )
_lowerCamelCase = ''''''
_lowerCamelCase = False
if decimal < 0:
_lowerCamelCase = True
decimal *= -1
while decimal > 0:
_lowerCamelCase , _lowerCamelCase = divmod(_a , 1_6 )
_lowerCamelCase = values[remainder] + hexadecimal
_lowerCamelCase = '''0x''' + hexadecimal
if negative:
_lowerCamelCase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = MobileBertConfig.from_json_file(_a )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase = MobileBertForPreTraining(_a )
# Load weights from tf checkpoint
_lowerCamelCase = load_tf_weights_in_mobilebert(_a , _a , _a )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 297 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( _a, _a, unittest.TestCase ):
_lowerCAmelCase = IFInpaintingPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _A ( self : List[str] ):
return self._get_dummy_components()
def _A ( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any]=0 ):
if str(lowerCamelCase__ ).startswith('''mps''' ):
lowerCAmelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
lowerCAmelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
lowerCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
lowerCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _A ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _A ( self : List[str] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _A ( self : str ):
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _A ( self : List[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _A ( self : Dict ):
self._test_save_load_local()
def _A ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 348 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch''']
lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase_ : Tuple = '''default_config.yaml'''
lowerCamelCase_ : str = config_folder / config_file
lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase_ : Dict = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase (cls ) -> Dict:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase (cls ) -> Any:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__magic_name__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : List[str] = '''test-tpu'''
lowerCamelCase_ : Dict = '''us-central1-a'''
lowerCamelCase_ : Any = '''ls'''
lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase_ : Tuple = '''cd /usr/share'''
lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
| 60 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "ibert"
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=False , UpperCamelCase__="none" , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : str = hidden_act
A__ : Tuple = intermediate_size
A__ : int = hidden_dropout_prob
A__ : int = attention_probs_dropout_prob
A__ : List[str] = max_position_embeddings
A__ : Union[str, Any] = type_vocab_size
A__ : Union[str, Any] = initializer_range
A__ : Any = layer_norm_eps
A__ : List[Any] = position_embedding_type
A__ : List[str] = quant_mode
A__ : Tuple = force_dequant
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def __snake_case ( self ):
if self.task == "multiple-choice":
A__ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 704 |
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 55 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def a (lowerCAmelCase__=32 , lowerCAmelCase__=10 , lowerCAmelCase__=100 , lowerCAmelCase__=1_026 , lowerCAmelCase__=True , lowerCAmelCase__="data/tokenized_stories_train_wikitext103.jbl" , lowerCAmelCase__="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
__a , __a = generate_datasets(
lowerCAmelCase__ , lowerCAmelCase__ , number=lowerCAmelCase__ , min_len=1_026 , trim=lowerCAmelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__a = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
__a = load_gpta("""gpt2""" ).to(lowerCAmelCase__ )
print("""computing perplexity on objective set""" )
__a = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).item()
print("""perplexity on objective set:""" , lowerCAmelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def a (lowerCAmelCase__ , lowerCAmelCase__=15 , lowerCAmelCase__=128 , lowerCAmelCase__=100 , lowerCAmelCase__="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
__a = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
__a = SecondaryLearner(lowerCAmelCase__ )
# Train secondary learner
__a = train_secondary_learner(
lowerCAmelCase__ , lowerCAmelCase__ , max_epochs=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , eval_freq=100 , igf_model_path=lowerCAmelCase__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=32 , lowerCAmelCase__=1_000 , lowerCAmelCase__=16 , lowerCAmelCase__=1.0 , lowerCAmelCase__=recopy_gpta , lowerCAmelCase__=None , lowerCAmelCase__=10 , lowerCAmelCase__="gpt2_finetuned.pt" , ):
__a = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
__a = RandomSampler(lowerCAmelCase__ )
__a = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ )
__a = max_steps // (len(lowerCAmelCase__ )) + 1
__a = 0
__a = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCAmelCase__ )
__a , __a , __a = recopy_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCAmelCase__ )
secondary_learner.eval()
__a = []
__a = 0
__a = []
__a = []
# Compute the performance of the transformer model at the beginning
__a = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
test_perps.append(lowerCAmelCase__ )
print("""Test perplexity, step""" , lowerCAmelCase__ , """:""" , lowerCAmelCase__ )
for epoch in range(int(lowerCAmelCase__ ) ):
for step, example in enumerate(lowerCAmelCase__ ):
torch.cuda.empty_cache()
__a = random.randint(0 , example.size(2 ) - context_len - 1 )
__a = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__a = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
__a = True
if secondary_learner is not None:
__a = secondary_learner.forward(
torch.tensor(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCAmelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__a = -1
if predicted_q < threshold:
__a = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__a = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__a = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__a = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
test_perps.append(lowerCAmelCase__ )
print("""Test perplexity, step""" , lowerCAmelCase__ , """:""" , lowerCAmelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCAmelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def a ():
__a = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=lowerCAmelCase__ , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=lowerCAmelCase__ , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=lowerCAmelCase__ , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=lowerCAmelCase__ , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=lowerCAmelCase__ , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=lowerCAmelCase__ , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=lowerCAmelCase__ , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=lowerCAmelCase__ , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=lowerCAmelCase__ , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=lowerCAmelCase__ , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=lowerCAmelCase__ , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=lowerCAmelCase__ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
__a = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
__a = training_secondary_learner(
lowerCAmelCase__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
__a = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__a , __a = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=lowerCAmelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=lowerCAmelCase__ , secondary_learner=lowerCAmelCase__ , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 99 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 328 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ : Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if "://" in dataset_path:
lowercase = dataset_path.split("""://""" )[1]
return dataset_path
def SCREAMING_SNAKE_CASE ( lowercase_ : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE ( lowercase_ : fsspec.AbstractFileSystem , lowercase_ : str , lowercase_ : str ):
lowercase = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) , fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ , lowercase_ , recursive=lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase = None
lowercase = None
lowercase = threading.Lock()
| 715 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class __lowercase ( __lowercase , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def __a ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = PegasusTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self : List[Any] ) -> int:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __a ( self : Dict , **__lowerCamelCase : int ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self : Tuple , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
return ("This is a test", "This is a test")
def __a ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase = '''</s>'''
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self : int ) -> List[str]:
'''simple docstring'''
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_lowerCAmelCase ) , 11_03 )
def __a ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __a ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase = rust_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
lowercase = py_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
lowercase = tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
lowercase = '''To ensure a smooth flow of bank resolutions.'''
lowercase = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
lowercase = tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __a ( self : str ) -> Any:
'''simple docstring'''
lowercase = ['''This is going to be way too long.''' * 1_50, '''short example''']
lowercase = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase = self._large_tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='''pt''' )
lowercase = self._large_tokenizer(
text_target=_lowerCAmelCase , max_length=5 , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def __a ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( __lowercase , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def __a ( self : List[str] ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = PegasusTokenizer(_lowerCAmelCase , offset=0 , mask_token_sent=_lowerCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __a ( self : List[str] , **__lowerCamelCase : Tuple ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self : Optional[int] , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
return ("This is a test", "This is a test")
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase = rust_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
lowercase = py_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@require_torch
def __a ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase = ['''This is going to be way too long.''' * 10_00, '''short example''']
lowercase = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase = self._large_tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='''pt''' )
lowercase = self._large_tokenizer(
text_target=_lowerCAmelCase , max_length=5 , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
def __a ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase = self._large_tokenizer(_lowerCAmelCase ).input_ids
self.assertListEqual(
_lowerCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 604 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 0 |
"""simple docstring"""
import re
import subprocess
import sys
a_ : Optional[int] = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
a_ : Optional[int] = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
)
a_ : List[Any] = '''|'''.join(sys.argv[1:])
a_ : List[Any] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
a_ : Any = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 711 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ : Optional[int] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.0_1),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case_ ( cls ):
__lowerCamelCase : Tuple = TOKEN
HfFolder.save_token(__a )
@classmethod
def snake_case_ ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def snake_case_ ( self ):
__lowerCamelCase : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCamelCase : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , repo_id='test-config' , push_to_hub=__a , use_auth_token=self._token )
__lowerCamelCase : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ ( self ):
__lowerCamelCase : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCamelCase : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id='valid_org/test-config-org' , push_to_hub=__a , use_auth_token=self._token )
__lowerCamelCase : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ ( self ):
CustomConfig.register_for_auto_class()
__lowerCamelCase : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=__a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
__lowerCamelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCamelCase : List[str] = c.n_embd + 1 # int
__lowerCamelCase : Dict = c.resid_pdrop + 1.0 # float
__lowerCamelCase : int = not c.scale_attn_weights # bool
__lowerCamelCase : Optional[int] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__a , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__a , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__a , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__a , c.summary_type , 'mismatch for key: summary_type' )
def snake_case_ ( self ):
__lowerCamelCase : Tuple = PretrainedConfig()
__lowerCamelCase : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__a , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCamelCase : int = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )]
if len(__a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {", ".join(__a )}.''' )
def snake_case_ ( self ):
with self.assertRaises(__a ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__a )
def snake_case_ ( self ):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : List[str] = mock.Mock()
__lowerCamelCase : Tuple = 500
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[Any] = HTTPError
__lowerCamelCase : str = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__a ) as mock_head:
__lowerCamelCase : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self ):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCamelCase : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__a )
__lowerCamelCase : Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(__a , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCamelCase : Any = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCamelCase : Any = ['config.42.0.0.json']
__lowerCamelCase : Tuple = 768
configuration.save_pretrained(__a )
shutil.move(os.path.join(__a , 'config.4.0.0.json' ) , os.path.join(__a , 'config.42.0.0.json' ) )
__lowerCamelCase : Dict = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 768 )
def snake_case_ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCamelCase : List[str] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCamelCase : Tuple = 'v4.0.0'
__lowerCamelCase , __lowerCamelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
__a , return_unused_kwargs=__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCamelCase : Union[str, Any] = 'v3.0.0'
__lowerCamelCase : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(__a )
self.assertEqual(old_configuration.hidden_size , 768 )
| 263 | 0 |
from __future__ import annotations
import math
def _snake_case ( __snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCamelCase = []
for num in range(len(__snake_case ) ):
_UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
_UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def _snake_case ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 768 , )->Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
A_ : Any = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )->Tuple:
'''simple docstring'''
A_ : Optional[Any] = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
A_ : str = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : List[str] = (embeds * self.std) + self.mean
return embeds
| 590 | 0 |
SCREAMING_SNAKE_CASE = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def a (lowerCAmelCase__ ):
__a = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
__a = 0
__a = 0
while place < len(snake_case_ ):
if (place + 1 < len(snake_case_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a (lowerCAmelCase__ ):
__a = []
for arabic, roman in ROMAN:
(__a) = divmod(snake_case_ , snake_case_ )
result.append(roman * factor )
if number == 0:
break
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
from math import ceil, sqrt
def a (lowerCAmelCase__ = 1_000_000 ):
__a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 209 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = GPTSwaTokenizer
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Union[str, Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = GPTSwaTokenizer(A , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def a__ (self , A ) -> str:
"""simple docstring"""
_a = '''This is a test'''
_a = '''This is a test'''
return input_text, output_text
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = '''<s>'''
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(A ) , 2_000 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = GPTSwaTokenizer(A )
_a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [465, 287, 265, 631, 842] )
_a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
A , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
_a = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
_a = tokenizer.convert_ids_to_tokens(A )
# fmt: off
self.assertListEqual(
A , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = GPTSwaTokenizer(A )
_a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
_a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A , A ):
self.assertListEqual(tokenizer.encode_fast(A ) , A )
# Test that decode_fast returns the input text
for text, token_ids in zip(A , A ):
self.assertEqual(tokenizer.decode_fast(A ) , A )
@slow
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
_a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=A , )
| 11 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 1 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->list[list]:
UpperCAmelCase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase_ ):
UpperCAmelCase = row[0]
for column_index, column in enumerate(lowerCAmelCase_ ):
if magnitude == 0:
UpperCAmelCase = column
continue
UpperCAmelCase = column / magnitude
# Subtract to cancel term
UpperCAmelCase = current_set[0]
UpperCAmelCase = [first_row]
UpperCAmelCase = current_set[1::]
for row in current_set:
UpperCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase_ )
continue
for column_index in range(len(lowerCAmelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase = final_set[0]
UpperCAmelCase = []
UpperCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase = simplify(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase_ )
UpperCAmelCase = resultant
return final_set
def _UpperCamelCase ( lowerCAmelCase_ ) ->list:
if len(lowerCAmelCase_ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
UpperCAmelCase = len(lowerCAmelCase_ ) + 1
if any(len(lowerCAmelCase_ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase_ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase = data_set.copy()
UpperCAmelCase = []
for row_index, row in enumerate(lowerCAmelCase_ ):
if 0 not in row:
UpperCAmelCase = data_set.pop(lowerCAmelCase_ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase_ )
UpperCAmelCase = data_set.copy()
UpperCAmelCase = simplify(lowerCAmelCase_ )
UpperCAmelCase = simplified[::-1]
UpperCAmelCase = []
for row in simplified:
UpperCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase = row.copy()[: len(lowerCAmelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase_ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase = temp_row[1::]
UpperCAmelCase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase_ )
UpperCAmelCase = []
for item in solutions:
final.append(float(round(lowerCAmelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 710 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
def __init__( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : Optional[Any]=3_0 , __lowerCamelCase : Any=2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : str=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Union[str, Any]=3_7 , __lowerCamelCase : str="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1_0 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=2 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 2
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFDeiTModel(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForMaskedImageModeling(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForMaskedImageModeling(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase = TFDeiTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Dense ) )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowercase ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=False ) -> int:
"""simple docstring"""
UpperCAmelCase = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFDeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) ->Tuple:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 627 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase_ : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase_ : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def A__ ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def A__ ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def A__ ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 64 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''fnet'''
def __init__( self : Any , lowerCamelCase_ : List[str]=3_20_00 , lowerCamelCase_ : List[Any]=7_68 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : Optional[Any]=30_72 , lowerCamelCase_ : Optional[Any]="gelu_new" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Union[str, Any]=5_12 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : Any=False , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=3 , lowerCamelCase_ : int=1 , lowerCamelCase_ : Optional[Any]=2 , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : int = use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE : List[Any] = tpu_short_seq_length
| 379 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def UpperCAmelCase__ ():
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_snake_case : Dict = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , snake_case__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def UpperCAmelCase__ ():
"""simple docstring"""
assert _test_patching.open is open
_snake_case : Any = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , snake_case__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , snake_case__ ):
pass
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , snake_case__ ) is None
with patch_submodule(_test_patching , """len""" , snake_case__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = """__test_patch_submodule_start_and_stop_mock__"""
_snake_case : Dict = patch_submodule(_test_patching , """open""" , snake_case__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def UpperCAmelCase__ ():
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_snake_case : Any = """__test_patch_submodule_successive_join__"""
_snake_case : Union[str, Any] = """__test_patch_submodule_successive_dirname__"""
_snake_case : Tuple = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , snake_case__ ):
with patch_submodule(_test_patching , """os.rename""" , snake_case__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , snake_case__ ):
with patch_submodule(_test_patching , """os.path.join""" , snake_case__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , snake_case__ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , snake_case__ ):
pass
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
def _a ( __UpperCamelCase : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b)) | 52 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : Optional[Union[str, Path]] = None
snake_case_ : bool = False
snake_case_ : bool = False
snake_case_ : bool = False
snake_case_ : Optional[Dict] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = False
snake_case_ : bool = False
snake_case_ : bool = False
snake_case_ : bool = True
snake_case_ : Optional[int] = None
snake_case_ : int = 1
snake_case_ : Optional[Union[str, bool]] = None
snake_case_ : bool = False
snake_case_ : Optional[Dict] = None
snake_case_ : Optional[str] = None
def UpperCamelCase_ ( self : Dict) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase) for k, v in self.__dict__.items()})
| 198 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Optional[Any] = BertJapaneseTokenizer
snake_case_ : Optional[int] = False
snake_case_ : Optional[int] = True
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
super().setUp()
_snake_case : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
_snake_case : List[Any] = """こんにちは、世界。 \nこんばんは、世界。"""
_snake_case : Optional[Any] = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case , _snake_case : Dict = self.get_input_output_texts(lowerCAmelCase)
_snake_case : Optional[Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
_snake_case : Tuple = tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase)
return text, ids
def UpperCamelCase_ ( self : List[Any]) -> int:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : Dict) -> Any:
"""simple docstring"""
_snake_case : List[str] = self.tokenizer_class(self.vocab_file)
_snake_case : List[Any] = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""")
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def UpperCamelCase_ ( self : int) -> Dict:
"""simple docstring"""
_snake_case : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""")
self.assertIsNotNone(lowerCAmelCase)
_snake_case : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
_snake_case : Dict = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_snake_case : Tuple = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase , """wb""") as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase)
with open(lowerCAmelCase , """rb""") as handle:
_snake_case : Dict = pickle.load(lowerCAmelCase)
_snake_case : Optional[int] = tokenizer_new.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = MecabTokenizer(mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase_ ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
try:
_snake_case : Optional[int] = MecabTokenizer(mecab_dic="""unidic_lite""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase_ ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
try:
_snake_case : List[Any] = MecabTokenizer(mecab_dic="""unidic""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase_ ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = MecabTokenizer(do_lower_case=lowerCAmelCase , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
try:
_snake_case : Dict = MecabTokenizer(
do_lower_case=lowerCAmelCase , normalize_text=lowerCAmelCase , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def UpperCamelCase_ ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_snake_case : str = MecabTokenizer(normalize_text=lowerCAmelCase , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def UpperCamelCase_ ( self : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""")
self.assertIsNotNone(lowerCAmelCase)
_snake_case : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
_snake_case : str = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase , """wb""") as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase)
with open(lowerCAmelCase , """rb""") as handle:
_snake_case : Optional[Any] = pickle.load(lowerCAmelCase)
_snake_case : Tuple = tokenizer_new.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@require_sudachi
def UpperCamelCase_ ( self : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
_snake_case : Optional[Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""])
@require_sudachi
def UpperCamelCase_ ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""])
@require_sudachi
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""])
@require_sudachi
def UpperCamelCase_ ( self : Tuple) -> Tuple:
"""simple docstring"""
_snake_case : List[str] = SudachiTokenizer(do_lower_case=lowerCAmelCase , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def UpperCamelCase_ ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = SudachiTokenizer(normalize_text=lowerCAmelCase , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def UpperCamelCase_ ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = SudachiTokenizer(trim_whitespace=lowerCAmelCase , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
_snake_case : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""")
self.assertIsNotNone(lowerCAmelCase)
_snake_case : Optional[Any] = """こんにちは、世界。\nこんばんは、世界。"""
_snake_case : Tuple = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_snake_case : str = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase , """wb""") as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase)
with open(lowerCAmelCase , """rb""") as handle:
_snake_case : int = pickle.load(lowerCAmelCase)
_snake_case : List[Any] = tokenizer_new.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@require_jumanpp
def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : Dict) -> Optional[int]:
"""simple docstring"""
_snake_case : Union[str, Any] = JumanppTokenizer(do_lower_case=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : int) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = JumanppTokenizer(normalize_text=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_snake_case : str = JumanppTokenizer(trim_whitespace=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
_snake_case : str = {}
for i, token in enumerate(lowerCAmelCase):
_snake_case : List[Any] = i
_snake_case : List[Any] = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""])
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
_snake_case : Optional[int] = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""")
_snake_case : Tuple = tokenizer.subword_tokenizer
_snake_case : Tuple = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""")
self.assertListEqual(lowerCAmelCase , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""])
_snake_case : Union[str, Any] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""")
self.assertListEqual(lowerCAmelCase , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""])
def UpperCamelCase_ ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_snake_case : int = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""")
_snake_case : str = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase)
_snake_case : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase)
_snake_case : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase)
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Tuple = BertJapaneseTokenizer
snake_case_ : Dict = False
def UpperCamelCase_ ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
super().setUp()
_snake_case : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def UpperCamelCase_ ( self : str , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase)
def UpperCamelCase_ ( self : str , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = """こんにちは、世界。 \nこんばんは、世界。"""
_snake_case : List[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def UpperCamelCase_ ( self : str) -> int:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : str) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""")
_snake_case : Dict = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""")
self.assertListEqual(
lowerCAmelCase , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def UpperCamelCase_ ( self : List[str]) -> int:
"""simple docstring"""
_snake_case : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_snake_case : int = {}
for i, token in enumerate(lowerCAmelCase):
_snake_case : int = i
_snake_case : Optional[int] = CharacterTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""])
self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""])
def UpperCamelCase_ ( self : int) -> List[str]:
"""simple docstring"""
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""")
_snake_case : List[str] = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase)
_snake_case : Optional[int] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase)
_snake_case : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase)
_snake_case : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = """cl-tohoku/bert-base-japanese"""
_snake_case : int = AutoTokenizer.from_pretrained(lowerCAmelCase)
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : str) -> Any:
"""simple docstring"""
_snake_case : str = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
_snake_case : Any = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
| 198 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE : Union[str, Any] = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE : List[Any] = BASE_URL + "/user"
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE : Any = os.environ.get("USER_TOKEN", "")
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
UpperCamelCase_ : Union[str, Any] = {
"""Authorization""": f'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 635 | from __future__ import annotations
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 1 |
import random
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = num - 1
UpperCAmelCase_ = 0
while s % 2 == 0:
UpperCAmelCase_ = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase_ = random.randrange(2 , num - 1 )
UpperCAmelCase_ = pow(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if v != 1:
UpperCAmelCase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase_ = i + 1
UpperCAmelCase_ = (v**2) % num
return True
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 2:
return False
UpperCAmelCase_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__UpperCAmelCase )
def A ( __UpperCAmelCase = 1024 ) -> int:
'''simple docstring'''
while True:
UpperCAmelCase_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__UpperCAmelCase ):
return num
if __name__ == "__main__":
UpperCamelCase_ = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 561 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A ( ) -> Optional[int]:
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class a_ ( nn.Module ):
def __init__( self :Dict) -> Any:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :str , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( unittest.TestCase ):
def __a ( self :Any) -> int:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[str]):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :Optional[int] , _lowercase :str):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase_ , UpperCAmelCase_ = mock_training_loop_function('''hello''')
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def __a ( self :Optional[Any]) -> str:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(_lowercase :Optional[Any]):
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :Any) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :Tuple):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :str) -> Dict:
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def __a ( self :Optional[int]) -> Any:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :List[str]):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase)
UpperCAmelCase_ = release_memory(_lowercase)
self.assertEqual(torch.cuda.memory_allocated() , _lowercase)
| 561 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> int:
'''simple docstring'''
_UpperCAmelCase : int = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_UpperCAmelCase : str = "segformer.encoder." + key
if key.startswith("backbone" ):
_UpperCAmelCase : List[str] = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : List[Any] = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Tuple = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(_snake_case )-1}' )
if "norm" in key:
_UpperCAmelCase : List[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : List[str] = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_UpperCAmelCase : Dict = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(_snake_case )-1}' )
if "layer_norm1" in key:
_UpperCAmelCase : int = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : Dict = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Tuple = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : Optional[Any] = key.replace(f'block{idx}' , f'block.{int(_snake_case )-1}' )
if "attn.q" in key:
_UpperCAmelCase : Tuple = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : Any = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace("attn" , "attention.self" )
if "fc1" in key:
_UpperCAmelCase : Tuple = key.replace("fc1" , "dense1" )
if "fc2" in key:
_UpperCAmelCase : Tuple = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Tuple = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : Tuple = key.replace("linear_fuse.conv" , "linear_fuse" )
_UpperCAmelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : int = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : List[str] = key.replace(f'linear_c{idx}' , f'linear_c.{int(_snake_case )-1}' )
if key.startswith("head" ):
_UpperCAmelCase : List[str] = key.replace("head" , "classifier" )
_UpperCAmelCase : List[str] = value
return new_state_dict
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Optional[int] = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
_UpperCAmelCase : Dict = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : List[Any] = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Dict = kv_bias[
config.hidden_sizes[i] :
]
def __snake_case ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = SegformerConfig()
_UpperCAmelCase : Tuple = False
# set attributes based on model_name
_UpperCAmelCase : Any = "huggingface/label-files"
if "segformer" in model_name:
_UpperCAmelCase : List[Any] = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_UpperCAmelCase : Optional[int] = 150
_UpperCAmelCase : Union[str, Any] = "ade20k-id2label.json"
_UpperCAmelCase : List[str] = (1, 150, 128, 128)
elif "city" in model_name:
_UpperCAmelCase : List[str] = 19
_UpperCAmelCase : str = "cityscapes-id2label.json"
_UpperCAmelCase : Tuple = (1, 19, 128, 128)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Tuple = model_name[4:6]
_UpperCAmelCase : Optional[int] = 1_000
_UpperCAmelCase : Dict = "imagenet-1k-id2label.json"
_UpperCAmelCase : Any = (1, 1_000)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
_UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : int = {int(_snake_case ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_UpperCAmelCase : List[Any] = [64, 128, 320, 512]
_UpperCAmelCase : Union[str, Any] = 256
elif size == "b2":
_UpperCAmelCase : Any = [64, 128, 320, 512]
_UpperCAmelCase : Optional[int] = 768
_UpperCAmelCase : Any = [3, 4, 6, 3]
elif size == "b3":
_UpperCAmelCase : Union[str, Any] = [64, 128, 320, 512]
_UpperCAmelCase : List[Any] = 768
_UpperCAmelCase : Any = [3, 4, 18, 3]
elif size == "b4":
_UpperCAmelCase : str = [64, 128, 320, 512]
_UpperCAmelCase : Optional[int] = 768
_UpperCAmelCase : List[Any] = [3, 8, 27, 3]
elif size == "b5":
_UpperCAmelCase : str = [64, 128, 320, 512]
_UpperCAmelCase : List[str] = 768
_UpperCAmelCase : Tuple = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
_UpperCAmelCase : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
# prepare image
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Any = image_processor(images=_snake_case , return_tensors="pt" ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
_UpperCAmelCase : List[Any] = torch.load(_snake_case , map_location=torch.device("cpu" ) )
else:
_UpperCAmelCase : Dict = torch.load(_snake_case , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_UpperCAmelCase : Optional[int] = rename_keys(_snake_case , encoder_only=_snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
if encoder_only:
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = SegformerForImageClassification(_snake_case )
else:
_UpperCAmelCase : Optional[int] = SegformerForSemanticSegmentation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
_UpperCAmelCase : str = model(_snake_case )
_UpperCAmelCase : List[str] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_UpperCAmelCase : str = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_UpperCAmelCase : Optional[int] = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_UpperCAmelCase : str = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_UpperCAmelCase : Dict = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_UpperCAmelCase : Any = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_UpperCAmelCase : Tuple = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_UpperCAmelCase : List[Any] = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_UpperCAmelCase : int = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_UpperCAmelCase : Optional[Any] = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_UpperCAmelCase : Tuple = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_UpperCAmelCase : str = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_UpperCAmelCase : Tuple = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_UpperCAmelCase : List[Any] = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_UpperCAmelCase : Dict = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
_UpperCAmelCase : Optional[int] = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 289 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case : Optional[int] = get_logger(__name__)
snake_case : Union[str, Any] = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _snake_case :
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case :
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case ( snake_case ):
@add_start_docstrings(_a )
def __call__( self , _a , _a , _a , **_a ):
for processor in self:
__magic_name__ : str = inspect.signature(processor.__call__ ).parameters
if len(_a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__magic_name__ : Tuple = processor(_a , _a , _a , **_a )
else:
__magic_name__ : int = processor(_a , _a , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
if not isinstance(_a , _a ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__magic_name__ : str = temperature
def __call__( self , _a , _a , _a ):
__magic_name__ : List[Any] = scores / self.temperature
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a = -float("Inf" ) , _a = 1 ):
if not isinstance(_a , _a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_a , _a ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__magic_name__ : List[str] = top_p
__magic_name__ : Optional[Any] = filter_value
__magic_name__ : Tuple = min_tokens_to_keep
def __call__( self , _a , _a , _a ):
__magic_name__ , __magic_name__ : str = lax.top_k(_a , scores.shape[-1] )
__magic_name__ : Dict = jnp.full_like(_a , self.filter_value )
__magic_name__ : Union[str, Any] = jax.nn.softmax(_a , axis=-1 ).cumsum(axis=-1 )
__magic_name__ : int = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__magic_name__ : Tuple = jnp.roll(_a , 1 )
score_mask |= score_mask.at[:, 0].set(_a )
# min tokens to keep
__magic_name__ : int = score_mask.at[:, : self.min_tokens_to_keep].set(_a )
__magic_name__ : Any = jnp.where(_a , _a , _a )
__magic_name__ : Union[str, Any] = jax.lax.sort_key_val(_a , _a )[-1]
return next_scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a = -float("Inf" ) , _a = 1 ):
if not isinstance(_a , _a ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__magic_name__ : str = max(_a , _a )
__magic_name__ : Any = filter_value
def __call__( self , _a , _a , _a ):
__magic_name__ , __magic_name__ : Optional[Any] = scores.shape
__magic_name__ : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value )
__magic_name__ : Tuple = min(self.top_k , scores.shape[-1] ) # Safety check
__magic_name__ , __magic_name__ : str = lax.top_k(_a , _a )
__magic_name__ : Optional[int] = jnp.broadcast_to((jnp.arange(_a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__magic_name__ : int = topk_scores.flatten()
__magic_name__ : List[Any] = topk_indices.flatten() + shift
__magic_name__ : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(_a )
__magic_name__ : List[Any] = next_scores_flat.reshape(_a , _a )
return next_scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : Dict = bos_token_id
def __call__( self , _a , _a , _a ):
__magic_name__ : Optional[int] = jnp.full(scores.shape , -float("inf" ) )
__magic_name__ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
__magic_name__ : Dict = jnp.where(_a , new_scores.at[:, self.bos_token_id].set(0 ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
__magic_name__ : Tuple = max_length
__magic_name__ : int = eos_token_id
def __call__( self , _a , _a , _a ):
__magic_name__ : Dict = jnp.full(scores.shape , -float("inf" ) )
__magic_name__ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__magic_name__ : Dict = jnp.where(_a , new_scores.at[:, self.eos_token_id].set(0 ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
if not isinstance(_a , _a ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_a , _a ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__magic_name__ : int = min_length
__magic_name__ : Any = eos_token_id
def __call__( self , _a , _a , _a ):
# create boolean flag to decide if min length penalty should be applied
__magic_name__ : Optional[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__magic_name__ : List[Any] = jnp.where(_a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
__magic_name__ : Dict = list(_a )
__magic_name__ : Union[str, Any] = begin_index
def __call__( self , _a , _a , _a ):
__magic_name__ : Any = 1 - jnp.bool_(cur_len - self.begin_index )
__magic_name__ : Dict = jnp.where(_a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : List[Any] = list(_a )
def __call__( self , _a , _a , _a ):
__magic_name__ : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : Tuple = dict(_a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__magic_name__ : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__magic_name__ : Any = force_token_array.at[index].set(_a )
__magic_name__ : List[Any] = jnp.intaa(_a )
def __call__( self , _a , _a , _a ):
def _force_token(_a ):
__magic_name__ : Any = scores.shape[0]
__magic_name__ : int = self.force_token_array[generation_idx]
__magic_name__ : Union[str, Any] = jnp.ones_like(_a , dtype=scores.dtype ) * -float("inf" )
__magic_name__ : str = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__magic_name__ : Any = lax.dynamic_update_slice(_a , _a , (0, current_token) )
return new_scores
__magic_name__ : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_a ) , lambda: scores , ) , )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a , _a ):
__magic_name__ : Optional[int] = generate_config.eos_token_id
__magic_name__ : List[Any] = generate_config.no_timestamps_token_id
__magic_name__ : Optional[Any] = generate_config.no_timestamps_token_id + 1
__magic_name__ : int = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_a , "max_initial_timestamp_index" ):
__magic_name__ : List[Any] = generate_config.max_initial_timestamp_index
else:
__magic_name__ : str = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__magic_name__ : str = model_config.vocab_size
def __call__( self , _a , _a , _a ):
# suppress <|notimestamps|> which is handled by without_timestamps
__magic_name__ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_a , _a ):
__magic_name__ : int = jnp.where((cur_len - self.begin_index) >= 1 , _a , _a )
__magic_name__ : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _a , )
__magic_name__ : Dict = jnp.where((cur_len - self.begin_index) < 2 , _a , _a )
__magic_name__ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _a , _a , )
return jnp.where(
_a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _a , )
__magic_name__ : Any = jax.vmap(_a )(_a , _a )
__magic_name__ : Dict = jnp.where(cur_len == self.begin_index , _a , _a )
__magic_name__ : List[str] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _a , )
__magic_name__ : Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index
__magic_name__ : Tuple = jnp.where(
_a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__magic_name__ : Union[str, Any] = jax.nn.log_softmax(_a , axis=-1 )
def handle_cumulative_probs(_a , _a ):
__magic_name__ : int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__magic_name__ : int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _a , )
__magic_name__ : int = jax.vmap(_a )(_a , _a )
return scores
| 124 | 0 |
'''simple docstring'''
import argparse
lowerCAmelCase_ = 'docs/source/_static/js/custom.js'
def A__ ( A : Dict):
'''simple docstring'''
with open(A , encoding="utf-8" , newline="\n") as f:
UpperCamelCase : Union[str, Any] = f.readlines()
UpperCamelCase : Optional[int] = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion ="):
index += 1
UpperCamelCase : Any = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {"):
index += 1
# We go until the end
while not lines[index].startswith("}"):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(A , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(A)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase_ = parser.parse_args()
update_custom_js(args.version)
| 435 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Dict:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = arr.split("," )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = [int(self.array[0] )] * len(self.array )
UpperCamelCase : int = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCamelCase : Tuple = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCamelCase : Optional[Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowerCAmelCase_ = input('please input some numbers:')
lowerCAmelCase_ = SubArray(whole_array)
lowerCAmelCase_ = array.solve_sub_array()
print(('the results is:', re))
| 435 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str=10 ) -> Optional[Any]:
_A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict=10 ) -> Optional[int]:
_A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
_A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ) -> Any:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase )
def snake_case_ ( self : int ) -> Union[str, Any]:
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
_A = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def snake_case_ ( self : int ) -> Union[str, Any]:
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCAmelCase , weight_decay=0.0 , relative_step=__lowerCAmelCase , scale_parameter=__lowerCAmelCase , warmup_init=__lowerCAmelCase , )
for _ in range(10_00 ):
_A = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
a__ : Optional[Any] = nn.Linear(50 , 50) if is_torch_available() else None
a__ : Dict = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
a__ : List[Any] = 10
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=None ) -> List[Any]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase , msg=__lowerCAmelCase )
def snake_case_ ( self : Any ) -> str:
_A = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_A , _A = data
_A = scheduler_func(self.optimizer , **__lowerCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_A = unwrap_schedule(__lowerCAmelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCAmelCase , __lowerCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_A = scheduler_func(self.optimizer , **__lowerCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCAmelCase ) # wrap to test picklability of the schedule
_A = unwrap_and_save_reload_schedule(__lowerCAmelCase , self.num_steps )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : Any ) -> List[Any]:
_A = fn
def __call__( self : Union[str, Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : List[str] ) -> Dict:
return self.fn(*__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def snake_case_ ( self : Any , __lowerCAmelCase : Optional[Any] ) -> List[str]:
_A = list(map(self , scheduler.lr_lambdas ) )
| 2 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A_ ( __a , __a , unittest.TestCase ):
_A :List[Any] = VQModel
_A :Any = '''sample'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : int=(32, 32) ):
lowercase = 4
lowercase = 3
lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (3, 32, 32)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
lowercase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowercase , lowercase = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case__ )
lowercase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowercase = image.to(snake_case__ )
with torch.no_grad():
lowercase = model(snake_case__ ).sample
lowercase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
| 428 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 319 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : str ,**_a : List[Any] ):
'''simple docstring'''
requires_backends(self ,['bs4'] )
super().__init__(**_a )
def __lowercase ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
_a : List[str] = []
_a : Any = []
_a : int = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_a : Dict = parent.find_all(child.name ,recursive=_a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_a ) else next(i for i, s in enumerate(_a ,1 ) if s is child ) )
_a : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __lowercase ( self : int ,_a : Any ):
'''simple docstring'''
_a : List[str] = BeautifulSoup(_a ,'html.parser' )
_a : int = []
_a : int = []
_a : Optional[Any] = []
for element in html_code.descendants:
if type(_a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_a : Tuple = html.unescape(_a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_a )
_a, _a : Tuple = self.xpath_soup(_a )
stringaxtag_seq.append(_a )
stringaxsubs_seq.append(_a )
if len(_a ) != len(_a ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(_a ) != len(_a ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __lowercase ( self : List[Any] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
_a : List[str] = ''
for tagname, subs in zip(_a ,_a ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : str ,_a : Tuple ):
'''simple docstring'''
_a : Optional[int] = False
# Check that strings has a valid type
if isinstance(_a ,_a ):
_a : Optional[int] = True
elif isinstance(_a ,(list, tuple) ):
if len(_a ) == 0 or isinstance(html_strings[0] ,_a ):
_a : int = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F"""but is of type {type(_a )}.""" )
_a : List[str] = bool(isinstance(_a ,(list, tuple) ) and (isinstance(html_strings[0] ,_a )) )
if not is_batched:
_a : Optional[int] = [html_strings]
# Get nodes + xpaths
_a : Union[str, Any] = []
_a : int = []
for html_string in html_strings:
_a, _a, _a : Any = self.get_three_from_single(_a )
nodes.append(_a )
_a : Tuple = []
for node, tag_list, sub_list in zip(_a ,_a ,_a ):
_a : Dict = self.construct_xpath(_a ,_a )
xpath_strings.append(_a )
xpaths.append(_a )
# return as Dict
_a : Optional[Any] = {'nodes': nodes, 'xpaths': xpaths}
_a : Tuple = BatchFeature(data=_a ,tensor_type=_a )
return encoded_inputs
| 319 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =42
_UpperCAmelCase =field(default_factory=lowercase_ )
_UpperCAmelCase =field(default_factory=lowercase_ )
def _lowerCAmelCase ( self: Optional[int] , a: List[Any] , a: Tensor , a: Tensor) ->Tuple:
'''simple docstring'''
a_ = len(list(m.modules())) == 1 or isinstance(a , nn.Convad) or isinstance(a , nn.BatchNormad)
if has_not_submodules:
self.traced.append(a)
def __call__( self: str , a: Tensor) ->Dict:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(a)
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
return list(filter(lambda a: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =42
_UpperCAmelCase =42
_UpperCAmelCase =0
_UpperCAmelCase =field(default_factory=lowercase_ )
_UpperCAmelCase =field(default_factory=lowercase_ )
def __call__( self: List[str] , a: Tensor) ->int:
'''simple docstring'''
a_ = Tracker(self.dest)(a).parametrized
a_ = Tracker(self.src)(a).parametrized
a_ = list(filter(lambda a: type(a) not in self.src_skip , a))
a_ = list(filter(lambda a: type(a) not in self.dest_skip , a))
if len(a) != len(a):
raise Exception(
f"""Numbers of operations are different. Source module has {len(a)} operations while"""
f""" destination module has {len(a)}.""")
for dest_m, src_m in zip(a , a):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""")
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = True ) -> Union[str, Any]:
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
a_ = timm.create_model(lowercase__ ,pretrained=lowercase__ ).eval()
a_ = ResNetForImageClassification(lowercase__ ).eval()
a_ = ModuleTransfer(src=lowercase__ ,dest=lowercase__ )
a_ = torch.randn((1, 3, 224, 224) )
module_transfer(lowercase__ )
assert torch.allclose(from_model(lowercase__ ) ,our_model(lowercase__ ).logits ), "The model logits don't match the original one."
a_ = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(lowercase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=lowercase__ ,)
# we can use the convnext one
a_ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=lowercase__ ,)
print(F"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase (lowercase__ ,lowercase__ = None ,lowercase__ = True ) -> int:
'''simple docstring'''
a_ = "imagenet-1k-id2label.json"
a_ = 1000
a_ = (1, num_labels)
a_ = "huggingface/label-files"
a_ = num_labels
a_ = json.load(open(hf_hub_download(lowercase__ ,lowercase__ ,repo_type="dataset" ) ,"r" ) )
a_ = {int(lowercase__ ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
a_ = partial(lowercase__ ,num_labels=lowercase__ ,idalabel=lowercase__ ,labelaid=lowercase__ )
a_ = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(lowercase__ ,names_to_config[model_name] ,lowercase__ ,lowercase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
return config, expected_shape
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
a_ = parser.parse_args()
a_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , **_A ):
'''simple docstring'''
super().__init__(**_A )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , _A , **_A ):
'''simple docstring'''
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
if "candidate_labels" in kwargs:
_SCREAMING_SNAKE_CASE =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_SCREAMING_SNAKE_CASE =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase_ ( self , _A , _A=None , _A="This is a photo of {}." ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(_A )
_SCREAMING_SNAKE_CASE =self.image_processor(images=[image] , return_tensors=self.framework )
_SCREAMING_SNAKE_CASE =candidate_labels
_SCREAMING_SNAKE_CASE =[hypothesis_template.format(_A ) for x in candidate_labels]
_SCREAMING_SNAKE_CASE =self.tokenizer(_A , return_tensors=self.framework , padding=_A )
_SCREAMING_SNAKE_CASE =[text_inputs]
return inputs
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =model_inputs.pop('''candidate_labels''' )
_SCREAMING_SNAKE_CASE =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _A ):
_SCREAMING_SNAKE_CASE =text_inputs[0]
else:
# Batching case.
_SCREAMING_SNAKE_CASE =text_inputs[0][0]
_SCREAMING_SNAKE_CASE =self.model(**_A , **_A )
_SCREAMING_SNAKE_CASE ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =model_outputs.pop('''candidate_labels''' )
_SCREAMING_SNAKE_CASE =model_outputs['''logits'''][0]
if self.framework == "pt":
_SCREAMING_SNAKE_CASE =logits.softmax(dim=-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =probs.tolist()
if not isinstance(_A , _A ):
_SCREAMING_SNAKE_CASE =[scores]
elif self.framework == "tf":
_SCREAMING_SNAKE_CASE =stable_softmax(_A , axis=-1 )
_SCREAMING_SNAKE_CASE =probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
_SCREAMING_SNAKE_CASE =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result
| 715 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = "owlvit_text_model"
def __init__( self , _A=4_9_4_0_8 , _A=5_1_2 , _A=2_0_4_8 , _A=1_2 , _A=8 , _A=1_6 , _A="quick_gelu" , _A=1E-5 , _A=0.0 , _A=0.02 , _A=1.0 , _A=0 , _A=4_9_4_0_6 , _A=4_9_4_0_7 , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_SCREAMING_SNAKE_CASE =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Tuple = "owlvit_vision_model"
def __init__( self , _A=7_6_8 , _A=3_0_7_2 , _A=1_2 , _A=1_2 , _A=3 , _A=7_6_8 , _A=3_2 , _A="quick_gelu" , _A=1E-5 , _A=0.0 , _A=0.02 , _A=1.0 , **_A , ):
'''simple docstring'''
super().__init__(**_A )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_SCREAMING_SNAKE_CASE =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Optional[int] = "owlvit"
lowercase : List[Any] = True
def __init__( self , _A=None , _A=None , _A=5_1_2 , _A=2.6592 , _A=True , **_A , ):
'''simple docstring'''
super().__init__(**_A )
if text_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
_SCREAMING_SNAKE_CASE =OwlViTTextConfig(**_A )
_SCREAMING_SNAKE_CASE =OwlViTVisionConfig(**_A )
_SCREAMING_SNAKE_CASE =projection_dim
_SCREAMING_SNAKE_CASE =logit_scale_init_value
_SCREAMING_SNAKE_CASE =return_dict
_SCREAMING_SNAKE_CASE =1.0
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_A , **_A )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
@classmethod
def UpperCamelCase_ ( cls , _A , _A , **_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =text_config
_SCREAMING_SNAKE_CASE =vision_config
return cls.from_dict(_A , **_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.text_config.to_dict()
_SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1E-4
def UpperCamelCase_ ( self , _A , _A = -1 , _A = -1 , _A = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =super().generate_dummy_inputs(
processor.tokenizer , batch_size=_A , seq_length=_A , framework=_A )
_SCREAMING_SNAKE_CASE =super().generate_dummy_inputs(
processor.image_processor , batch_size=_A , framework=_A )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1_4
| 165 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class a__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class a__ ( nn.Module ):
def __init__( self , A=3 , A=3 , A=("DownEncoderBlock2D",) , A=(64,) , A=2 , A=32 , A="silu" , A=True , ) -> str:
'''simple docstring'''
super().__init__()
a = layers_per_block
a = torch.nn.Convad(
A , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
a = None
a = nn.ModuleList([] )
# down
a = block_out_channels[0]
for i, down_block_type in enumerate(A ):
a = output_channel
a = block_out_channels[i]
a = i == len(A ) - 1
a = get_down_block(
A , num_layers=self.layers_per_block , in_channels=A , out_channels=A , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=A , resnet_groups=A , attention_head_dim=A , temb_channels=A , )
self.down_blocks.append(A )
# mid
a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=A , temb_channels=A , )
# out
a = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=A , eps=1e-6 )
a = nn.SiLU()
a = 2 * out_channels if double_z else out_channels
a = nn.Convad(block_out_channels[-1] , A , 3 , padding=1 )
a = False
def lowerCAmelCase_ ( self , A ) -> Tuple:
'''simple docstring'''
a = x
a = self.conv_in(A )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A ):
def custom_forward(*A ):
return module(*A )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(A ) , A , use_reentrant=A )
# middle
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A , use_reentrant=A )
else:
for down_block in self.down_blocks:
a = torch.utils.checkpoint.checkpoint(create_custom_forward(A ) , A )
# middle
a = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , A )
else:
# down
for down_block in self.down_blocks:
a = down_block(A )
# middle
a = self.mid_block(A )
# post-process
a = self.conv_norm_out(A )
a = self.conv_act(A )
a = self.conv_out(A )
return sample
class a__ ( nn.Module ):
def __init__( self , A=3 , A=3 , A=("UpDecoderBlock2D",) , A=(64,) , A=2 , A=32 , A="silu" , A="group" , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
a = layers_per_block
a = nn.Convad(
A , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
a = None
a = nn.ModuleList([] )
a = in_channels if norm_type == "spatial" else None
# mid
a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=A , temb_channels=A , )
# up
a = list(reversed(A ) )
a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A ):
a = output_channel
a = reversed_block_out_channels[i]
a = i == len(A ) - 1
a = get_up_block(
A , num_layers=self.layers_per_block + 1 , in_channels=A , out_channels=A , prev_output_channel=A , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=A , resnet_groups=A , attention_head_dim=A , temb_channels=A , resnet_time_scale_shift=A , )
self.up_blocks.append(A )
a = output_channel
# out
if norm_type == "spatial":
a = SpatialNorm(block_out_channels[0] , A )
else:
a = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=A , eps=1e-6 )
a = nn.SiLU()
a = nn.Convad(block_out_channels[0] , A , 3 , padding=1 )
a = False
def lowerCAmelCase_ ( self , A , A=None ) -> Any:
'''simple docstring'''
a = z
a = self.conv_in(A )
a = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A ):
def custom_forward(*A ):
return module(*A )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A , A , use_reentrant=A )
a = sample.to(A )
# up
for up_block in self.up_blocks:
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(A ) , A , A , use_reentrant=A )
else:
# middle
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A , A )
a = sample.to(A )
# up
for up_block in self.up_blocks:
a = torch.utils.checkpoint.checkpoint(create_custom_forward(A ) , A , A )
else:
# middle
a = self.mid_block(A , A )
a = sample.to(A )
# up
for up_block in self.up_blocks:
a = up_block(A , A )
# post-process
if latent_embeds is None:
a = self.conv_norm_out(A )
else:
a = self.conv_norm_out(A , A )
a = self.conv_act(A )
a = self.conv_out(A )
return sample
class a__ ( nn.Module ):
def __init__( self , A , A , A , A=None , A="random" , A=False , A=True ) -> Tuple:
'''simple docstring'''
super().__init__()
a = n_e
a = vq_embed_dim
a = beta
a = legacy
a = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
a = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
a = self.used.shape[0]
a = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
a = self.re_embed
a = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
a = n_e
a = sane_index_shape
def lowerCAmelCase_ ( self , A ) -> Any:
'''simple docstring'''
a = inds.shape
assert len(A ) > 1
a = inds.reshape(ishape[0] , -1 )
a = self.used.to(A )
a = (inds[:, :, None] == used[None, None, ...]).long()
a = match.argmax(-1 )
a = match.sum(2 ) < 1
if self.unknown_index == "random":
a = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
a = self.unknown_index
return new.reshape(A )
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
a = inds.shape
assert len(A ) > 1
a = inds.reshape(ishape[0] , -1 )
a = self.used.to(A )
if self.re_embed > self.used.shape[0]: # extra token
a = 0 # simply set to zero
a = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , A )
return back.reshape(A )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
a = z.permute(0 , 2 , 3 , 1 ).contiguous()
a = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
a = torch.argmin(torch.cdist(A , self.embedding.weight ) , dim=1 )
a = self.embedding(A ).view(z.shape )
a = None
a = None
# compute loss for embedding
if not self.legacy:
a = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
a = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
a = z + (z_q - z).detach()
# reshape back to match original input shape
a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
a = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
a = self.remap_to_used(A )
a = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
a = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase_ ( self , A , A ) -> Optional[Any]:
'''simple docstring'''
if self.remap is not None:
a = indices.reshape(shape[0] , -1 ) # add batch axis
a = self.unmap_to_all(A )
a = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
a = self.embedding(A )
if shape is not None:
a = z_q.view(A )
# reshape back to match original input shape
a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class a__ ( UpperCamelCase__ ):
def __init__( self , A , A=False ) -> Tuple:
'''simple docstring'''
a = parameters
a , a = torch.chunk(A , 2 , dim=1 )
a = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
a = deterministic
a = torch.exp(0.5 * self.logvar )
a = torch.exp(self.logvar )
if self.deterministic:
a = a = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCAmelCase_ ( self , A = None ) -> torch.FloatTensor:
'''simple docstring'''
a = randn_tensor(
self.mean.shape , generator=A , device=self.parameters.device , dtype=self.parameters.dtype )
a = self.mean + self.std * sample
return x
def lowerCAmelCase_ ( self , A=None ) -> List[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCAmelCase_ ( self , A , A=[1, 2, 3] ) -> str:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
a = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=A )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
return self.mean
| 515 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Any = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : int = """audio-spectrogram-transformer"""
def __init__( self , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.0 , A=0.0 , A=0.0_2 , A=1e-12 , A=16 , A=True , A=10 , A=10 , A=1024 , A=128 , **A , ) -> Any:
'''simple docstring'''
super().__init__(**A )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = patch_size
a = qkv_bias
a = frequency_stride
a = time_stride
a = max_length
a = num_mel_bins
| 515 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Optional[int] = "timm_backbone"
def __init__( self :Any , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :List[str]=3 , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :int=None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =backbone
_a : Dict =num_channels
_a : int =features_only
_a : str =use_pretrained_backbone
_a : str =True
_a : str =out_indices if out_indices is not None else (-1,)
| 700 |
'''simple docstring'''
from functools import lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> set:
_a : Any =2
_a : Tuple =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCAmelCase )
if n > 1:
factors.add(_UpperCAmelCase )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
return len(unique_prime_factors(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> bool:
return len(set(_UpperCAmelCase ) ) in (0, 1)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> list:
_a : int =2
while True:
# Increment each value of a generated range
_a : str =[base + i for i in range(_UpperCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_a : List[Any] =[upf_len(_UpperCAmelCase ) for x in group]
checker.append(_UpperCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4 ) -> int:
_a : Optional[int] =run(_UpperCAmelCase )
return results[0] if len(_UpperCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 506 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.