code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Union[str, Any] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Tuple = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : str = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase : Dict = BitConfig(
conv_layer=__lowerCAmelCase , num_labels=1_0_0_0 , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , )
return config
def __lowerCamelCase ( _lowercase ) -> Any:
if "stem.conv" in name:
UpperCAmelCase : str = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
UpperCAmelCase : str = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
UpperCAmelCase : int = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
UpperCAmelCase : str = """bit.""" + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase : Optional[int] = """bit.encoder.""" + name
return name
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : List[Any] = get_config(__lowerCAmelCase )
# load original model from timm
UpperCAmelCase : str = create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase : int = state_dict.pop(__lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
UpperCAmelCase : Any = BitForImageClassification(__lowerCAmelCase )
model.eval()
model.load_state_dict(__lowerCAmelCase )
# create image processor
UpperCAmelCase : Dict = create_transform(**resolve_data_config({} , model=__lowerCAmelCase ) )
UpperCAmelCase : Union[str, Any] = transform.transforms
UpperCAmelCase : Union[str, Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
UpperCAmelCase : Tuple = BitImageProcessor(
do_resize=__lowerCAmelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowerCAmelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Optional[int] = transform(__lowerCAmelCase ).unsqueeze(0 )
UpperCAmelCase : Any = processor(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
# verify logits
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(__lowerCAmelCase )
UpperCAmelCase : List[Any] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase : Optional[int] = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
a : List[Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Any = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : List[str] = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'beit'
def __init__( self , A=8192 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.0 , A=0.0 , A=0.0_2 , A=1e-12 , A=224 , A=16 , A=3 , A=False , A=False , A=False , A=False , A=0.1 , A=0.1 , A=True , A=[3, 5, 7, 11] , A=[1, 2, 3, 6] , A=True , A=0.4 , A=256 , A=1 , A=False , A=255 , **A , ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : Any = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : int = use_mask_token
UpperCAmelCase : Tuple = use_absolute_position_embeddings
UpperCAmelCase : Any = use_relative_position_bias
UpperCAmelCase : Optional[Any] = use_shared_relative_position_bias
UpperCAmelCase : Optional[Any] = layer_scale_init_value
UpperCAmelCase : List[Any] = drop_path_rate
UpperCAmelCase : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase : Optional[Any] = out_indices
UpperCAmelCase : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase : Union[str, Any] = use_auxiliary_head
UpperCAmelCase : int = auxiliary_loss_weight
UpperCAmelCase : str = auxiliary_channels
UpperCAmelCase : List[str] = auxiliary_num_convs
UpperCAmelCase : List[str] = auxiliary_concat_input
UpperCAmelCase : Tuple = semantic_loss_ignore_index
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowercase( self ) -> float:
return 1e-4
| 714 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 0 |
import doctest
from collections import deque
import numpy as np
class UpperCamelCase_ :
def __init__( self ) -> None:
UpperCAmelCase : int = [2, 1, 2, -1]
UpperCAmelCase : Union[str, Any] = [1, 2, 3, 4]
def _lowercase( self ) -> list[float]:
UpperCAmelCase : Tuple = len(self.first_signal )
UpperCAmelCase : Tuple = len(self.second_signal )
UpperCAmelCase : Dict = max(_UpperCamelCase , _UpperCamelCase )
# create a zero matrix of max_length x max_length
UpperCAmelCase : Tuple = [[0] * max_length for i in range(_UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCamelCase ):
UpperCAmelCase : Tuple = deque(self.second_signal )
rotated_signal.rotate(_UpperCamelCase )
for j, item in enumerate(_UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCAmelCase : Dict = np.matmul(np.transpose(_UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , A , A = True , A = None , A = 32 , A = True , A = 1 / 255 , A = True , A = True , A = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , A = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , A = True , A=7 , A=30 , A=400 , A=3 , ) -> List[Any]:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : List[Any] = do_resize
UpperCAmelCase : Dict = size if size is not None else {'shortest_edge': 288}
UpperCAmelCase : str = size_divisor
UpperCAmelCase : List[str] = do_rescale
UpperCAmelCase : int = rescale_factor
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : str = do_center_crop
UpperCAmelCase : Optional[int] = image_mean
UpperCAmelCase : Dict = image_std
UpperCAmelCase : int = do_pad
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : List[str] = min_resolution
UpperCAmelCase : Dict = max_resolution
def _lowercase( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowercase( self , A , A=False ) -> Optional[int]:
if not batched:
UpperCAmelCase : Optional[Any] = self.size['shortest_edge']
UpperCAmelCase : Tuple = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase : Optional[Any] = image.size
else:
UpperCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
UpperCAmelCase : Union[str, Any] = size / min(_A , _A )
if h < w:
UpperCAmelCase : List[str] = size, scale * w
else:
UpperCAmelCase : Tuple = scale * h, size
UpperCAmelCase : Optional[int] = int((1333 / 800) * size )
if max(_A , _A ) > max_size:
UpperCAmelCase : Union[str, Any] = max_size / max(_A , _A )
UpperCAmelCase : Optional[Any] = newh * scale
UpperCAmelCase : Any = neww * scale
UpperCAmelCase : Union[str, Any] = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase : Optional[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase : List[Any] = []
for image in image_inputs:
UpperCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase : Union[str, Any] = max(_A , key=lambda A : item[0] )[0]
UpperCAmelCase : List[str] = max(_A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( a__ , unittest.TestCase ):
lowercase = BridgeTowerImageProcessor if is_vision_available() else None
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = BridgeTowerImageProcessingTester(self )
@property
def _lowercase( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase( self ) -> str:
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , """image_mean""" ) )
self.assertTrue(hasattr(_A , """image_std""" ) )
self.assertTrue(hasattr(_A , """do_normalize""" ) )
self.assertTrue(hasattr(_A , """do_resize""" ) )
self.assertTrue(hasattr(_A , """size""" ) )
self.assertTrue(hasattr(_A , """size_divisor""" ) )
def _lowercase( self ) -> Union[str, Any]:
pass
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(_A , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : str = image_processing(_A , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase( self ) -> Dict:
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : int = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : List[str] = image_processing(_A , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 716 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 0 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
a : Optional[Any] = """bert-base-cased"""
a : Tuple = """google/pegasus-xsum"""
a : int = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
a : Tuple = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
a : List[str] = """patrickvonplaten/t5-tiny-random"""
a : Tuple = """sshleifer/bart-tiny-random"""
a : Any = """sshleifer/tiny-mbart"""
a : str = """sshleifer/tiny-marian-en-de"""
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
UpperCAmelCase : str = "\n".join(_lowerCamelCase )
Path(_lowerCamelCase ).open("""w""" ).writelines(_lowerCamelCase )
def __lowerCamelCase ( _lowercase ) -> str:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_lowerCamelCase , F'''{split}.source''' ) , _lowerCamelCase )
_dump_articles(os.path.join(_lowerCamelCase , F'''{split}.target''' ) , _lowerCamelCase )
return tmp_dir
class UpperCamelCase_ ( _A ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def _lowercase( self , A ) -> Tuple:
UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCAmelCase : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase : Optional[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES )
UpperCAmelCase : Optional[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES )
UpperCAmelCase : Tuple = 4
UpperCAmelCase : int = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase : Optional[Any] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
UpperCAmelCase : Any = SeqaSeqDataset(
__lowerCamelCase , data_dir=__lowerCamelCase , type_path="""train""" , max_source_length=__lowerCamelCase , max_target_length=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , )
UpperCAmelCase : List[Any] = DataLoader(__lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase : str = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _lowercase( self , A ) -> List[str]:
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCAmelCase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase : List[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES )
UpperCAmelCase : int = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES )
UpperCAmelCase : List[str] = 4
UpperCAmelCase : str = LegacySeqaSeqDataset(
__lowerCamelCase , data_dir=__lowerCamelCase , type_path="""train""" , max_source_length=20 , max_target_length=__lowerCamelCase , )
UpperCAmelCase : List[str] = DataLoader(__lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
UpperCAmelCase : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCAmelCase : Optional[int] = tmp_dir.joinpath("""train.source""" ).open().readlines()
UpperCAmelCase : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__lowerCamelCase , __lowerCamelCase , 128 , __lowerCamelCase )
UpperCAmelCase : List[str] = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase : Tuple = {x.name for x in save_dir.iterdir()}
UpperCAmelCase : List[str] = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowerCamelCase ) < len(__lowerCamelCase )
assert len(__lowerCamelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__lowerCamelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def _lowercase( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase : Any = self._get_dataset(max_len=64 )
UpperCAmelCase : List[Any] = 64
UpperCAmelCase : str = ds.make_dynamic_sampler(__lowerCamelCase , required_batch_size_multiple=__lowerCamelCase )
UpperCAmelCase : Optional[Any] = [len(__lowerCamelCase ) for x in batch_sampler]
assert len(set(__lowerCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowerCamelCase ) == len(__lowerCamelCase ) # no dropped or added examples
UpperCAmelCase : Any = DataLoader(__lowerCamelCase , batch_sampler=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Union[str, Any] = []
for batch in data_loader:
UpperCAmelCase : Union[str, Any] = batch["input_ids"].shape
UpperCAmelCase : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase : int = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(__lowerCamelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowerCamelCase )
assert num_src_per_batch[0] == max(__lowerCamelCase )
if failures:
raise AssertionError(f'''too many tokens in {len(__lowerCamelCase )} batches''' )
def _lowercase( self ) -> str:
UpperCAmelCase : Dict = self._get_dataset(max_len=512 )
UpperCAmelCase : List[str] = 2
UpperCAmelCase : List[str] = ds.make_sortish_sampler(__lowerCamelCase , shuffle=__lowerCamelCase )
UpperCAmelCase : Any = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase : List[str] = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowerCamelCase )
UpperCAmelCase : Optional[Any] = tokenizer.pad_token_id
def count_pad_tokens(A , A="input_ids" ):
return [batch[k].eq(__lowerCamelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowerCamelCase , k="""labels""" ) ) < sum(count_pad_tokens(__lowerCamelCase , k="""labels""" ) )
assert sum(count_pad_tokens(__lowerCamelCase ) ) < sum(count_pad_tokens(__lowerCamelCase ) )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
def _lowercase( self , A=1000 , A=128 ) -> Any:
if os.getenv("""USE_REAL_DATA""" , __lowerCamelCase ):
UpperCAmelCase : List[Any] = "examples/seq2seq/wmt_en_ro"
UpperCAmelCase : Dict = max_len * 2 * 64
if not Path(__lowerCamelCase ).joinpath("""train.len""" ).exists():
save_len_file(__lowerCamelCase , __lowerCamelCase )
else:
UpperCAmelCase : Tuple = "examples/seq2seq/test_data/wmt_en_ro"
UpperCAmelCase : List[str] = max_len * 4
save_len_file(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCAmelCase : Tuple = SeqaSeqDataset(
__lowerCamelCase , data_dir=__lowerCamelCase , type_path="""train""" , max_source_length=__lowerCamelCase , max_target_length=__lowerCamelCase , n_obs=__lowerCamelCase , )
return ds, max_tokens, tokenizer
def _lowercase( self ) -> Any:
UpperCAmelCase : Any = self._get_dataset()
UpperCAmelCase : Dict = set(DistributedSortishSampler(__lowerCamelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowerCamelCase ) )
UpperCAmelCase : Union[str, Any] = set(DistributedSortishSampler(__lowerCamelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowerCamelCase ) )
assert idsa.intersection(__lowerCamelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def _lowercase( self , A ) -> Optional[Any]:
UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
if tok_name == MBART_TINY:
UpperCAmelCase : str = SeqaSeqDataset(
__lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
UpperCAmelCase : int = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase : Dict = SeqaSeqDataset(
__lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
UpperCAmelCase : int = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowerCamelCase ) == 1 if tok_name == BART_TINY else len(__lowerCamelCase ) == 0
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
from math import factorial
def __lowerCamelCase ( _lowercase = 2_0 ) -> int:
UpperCAmelCase : Optional[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase : Dict = n // 2
return int(factorial(a_ ) / (factorial(a_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
a = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 718 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Any = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : int = pipe(
image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[Any] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 719 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : List[str] = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class UpperCamelCase_ ( _a ):
lowercase = """t5"""
lowercase = ["""past_key_values"""]
lowercase = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , A=32128 , A=512 , A=64 , A=2048 , A=6 , A=None , A=8 , A=32 , A=128 , A=0.1 , A=1e-6 , A=1.0 , A="relu" , A=True , A=True , A=0 , A=1 , **A , ) -> List[str]:
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : str = d_model
UpperCAmelCase : Optional[Any] = d_kv
UpperCAmelCase : List[Any] = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase : str = num_heads
UpperCAmelCase : Union[str, Any] = relative_attention_num_buckets
UpperCAmelCase : str = relative_attention_max_distance
UpperCAmelCase : Union[str, Any] = dropout_rate
UpperCAmelCase : List[Any] = layer_norm_epsilon
UpperCAmelCase : Tuple = initializer_factor
UpperCAmelCase : List[Any] = feed_forward_proj
UpperCAmelCase : Tuple = use_cache
UpperCAmelCase : Dict = self.feed_forward_proj.split("""-""" )
UpperCAmelCase : int = act_info[-1]
UpperCAmelCase : int = act_info[0] == """gated"""
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase : int = """gelu_new"""
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ , )
class UpperCamelCase_ ( _a ):
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Any = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
UpperCAmelCase : Optional[int] = """past_encoder_sequence + sequence"""
UpperCAmelCase : int = {0: """batch"""}
UpperCAmelCase : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
return common_inputs
@property
def _lowercase( self ) -> int:
return 13
| 720 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
a : Optional[int] = 4
a : Union[str, Any] = 3
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE__ ):
pass
def __lowerCamelCase ( _lowercase ) -> List[str]:
for shard in shards:
for i in range(__UpperCamelCase ):
yield {"i": i, "shard": shard}
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : Any = int(os.environ["""RANK"""] )
UpperCAmelCase : int = int(os.environ["""WORLD_SIZE"""] )
UpperCAmelCase : Union[str, Any] = ArgumentParser()
parser.add_argument("""--streaming""" , type=__UpperCamelCase )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase )
parser.add_argument("""--num_workers""" , type=__UpperCamelCase , default=0 )
UpperCAmelCase : Tuple = parser.parse_args()
UpperCAmelCase : int = args.streaming
UpperCAmelCase : Any = args.num_workers
UpperCAmelCase : List[Any] = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(__UpperCamelCase )]}
UpperCAmelCase : Any = IterableDataset.from_generator(__UpperCamelCase , gen_kwargs=__UpperCamelCase )
if not streaming:
UpperCAmelCase : Any = Dataset.from_list(list(__UpperCamelCase ) )
UpperCAmelCase : List[Any] = split_dataset_by_node(__UpperCamelCase , rank=__UpperCamelCase , world_size=__UpperCamelCase )
UpperCAmelCase : List[str] = torch.utils.data.DataLoader(__UpperCamelCase , num_workers=__UpperCamelCase )
UpperCAmelCase : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase : Optional[int] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase : Tuple = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 721 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 0 |
'''simple docstring'''
import baseaa
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return baseaa.baaencode(string.encode("""utf-8""" ) )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
return baseaa.baadecode(snake_case__ ).decode("""utf-8""" )
if __name__ == "__main__":
a : Dict = """Hello World!"""
a : Optional[Any] = baseaa_encode(test)
print(encoded)
a : Dict = baseaa_decode(encoded)
print(decoded)
| 700 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCamelCase_ ( lowercase__ ):
@slow
@require_torch
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase : int = bertabert.config.encoder.vocab_size
UpperCAmelCase : Union[str, Any] = tokenizer.sep_token_id
UpperCAmelCase : List[str] = tokenizer.cls_token_id
UpperCAmelCase : str = 128
UpperCAmelCase : Optional[int] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
UpperCAmelCase : str = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
UpperCAmelCase : List[Any] = train_dataset.select(range(32 ) )
UpperCAmelCase : Any = val_dataset.select(range(16 ) )
UpperCAmelCase : Any = 4
def _map_to_encoder_decoder_inputs(A ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase : int = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=A , max_length=512 )
UpperCAmelCase : str = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=A , max_length=128 )
UpperCAmelCase : int = inputs.input_ids
UpperCAmelCase : List[Any] = inputs.attention_mask
UpperCAmelCase : List[Any] = outputs.input_ids
UpperCAmelCase : Any = outputs.input_ids.copy()
UpperCAmelCase : Any = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
UpperCAmelCase : List[Any] = outputs.attention_mask
assert all(len(A ) == 512 for x in inputs.input_ids )
assert all(len(A ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A ):
UpperCAmelCase : Tuple = pred.label_ids
UpperCAmelCase : Any = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase : List[Any] = tokenizer.batch_decode(A , skip_special_tokens=A )
UpperCAmelCase : int = tokenizer.batch_decode(A , skip_special_tokens=A )
UpperCAmelCase : List[str] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A ) )] ) / len(A )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=A , batch_size=A , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
UpperCAmelCase : str = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=A , batch_size=A , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
UpperCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Optional[Any] = SeqaSeqTrainingArguments(
output_dir=A , per_device_train_batch_size=A , per_device_eval_batch_size=A , predict_with_generate=A , evaluation_strategy="""steps""" , do_train=A , do_eval=A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase : Optional[Any] = SeqaSeqTrainer(
model=A , args=A , compute_metrics=_compute_metrics , train_dataset=A , eval_dataset=A , tokenizer=A , )
# start training
trainer.train()
| 701 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Tuple = RemBertConfig.from_json_file(_lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(_lowercase ) ) )
UpperCAmelCase : Union[str, Any] = RemBertModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_lowercase ) )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 702 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : List[Any] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCamelCase_ ( UpperCamelCase_ ):
lowercase = 'decision_transformer'
lowercase = ['past_key_values']
lowercase = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , A=17 , A=4 , A=128 , A=4096 , A=True , A=1 , A=1024 , A=3 , A=1 , A=None , A="relu" , A=0.1 , A=0.1 , A=0.1 , A=1e-5 , A=0.0_2 , A=True , A=True , A=50256 , A=50256 , A=False , A=False , **A , ) -> List[str]:
UpperCAmelCase : List[str] = state_dim
UpperCAmelCase : List[str] = act_dim
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = max_ep_len
UpperCAmelCase : List[str] = action_tanh
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : List[Any] = n_positions
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : str = activation_function
UpperCAmelCase : Dict = resid_pdrop
UpperCAmelCase : Any = embd_pdrop
UpperCAmelCase : Any = attn_pdrop
UpperCAmelCase : Union[str, Any] = layer_norm_epsilon
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = scale_attn_weights
UpperCAmelCase : Optional[int] = use_cache
UpperCAmelCase : Union[str, Any] = scale_attn_by_inverse_layer_idx
UpperCAmelCase : Dict = reorder_and_upcast_attn
UpperCAmelCase : List[str] = bos_token_id
UpperCAmelCase : Optional[int] = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 703 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
a : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
a : Optional[int] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
a : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def _lowercase( self , A , A , A=None ) -> List[Any]:
return {
"matthews_correlation": float(matthews_corrcoef(A , A , sample_weight=A ) ),
}
| 704 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a : Optional[int] = logging.get_logger(__name__)
a : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
a : Dict = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
a : Optional[Any] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = 'whisper'
lowercase = ['past_key_values']
lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A=51865 , A=80 , A=6 , A=4 , A=6 , A=4 , A=1536 , A=1536 , A=0.0 , A=0.0 , A=50257 , A=True , A=True , A="gelu" , A=256 , A=0.0 , A=0.0 , A=0.0 , A=0.0_2 , A=False , A=1500 , A=448 , A=50256 , A=50256 , A=50256 , A=None , A=[220, 50256] , A=False , A=256 , A=False , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A=7 , **A , ) -> Any:
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : List[str] = num_mel_bins
UpperCAmelCase : Tuple = d_model
UpperCAmelCase : List[str] = encoder_layers
UpperCAmelCase : List[Any] = encoder_attention_heads
UpperCAmelCase : Dict = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase : Optional[Any] = encoder_ffn_dim
UpperCAmelCase : Union[str, Any] = dropout
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Optional[Any] = encoder_layerdrop
UpperCAmelCase : List[Any] = decoder_layerdrop
UpperCAmelCase : Tuple = use_cache
UpperCAmelCase : Optional[Any] = encoder_layers
UpperCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Optional[int] = max_source_positions
UpperCAmelCase : str = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Tuple = classifier_proj_size
UpperCAmelCase : Tuple = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Any = apply_spec_augment
UpperCAmelCase : Optional[int] = mask_time_prob
UpperCAmelCase : Optional[int] = mask_time_length
UpperCAmelCase : Optional[int] = mask_time_min_masks
UpperCAmelCase : Optional[Any] = mask_feature_prob
UpperCAmelCase : Optional[Any] = mask_feature_length
UpperCAmelCase : Any = mask_feature_min_masks
UpperCAmelCase : List[str] = median_filter_width
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , suppress_tokens=_a , begin_suppress_tokens=_a , **_a , )
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
@property
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[int] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase : Optional[int] = {0: """batch"""}
else:
UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a , direction="""inputs""" )
return common_inputs
def _lowercase( self , A , A = -1 , A = -1 , A = False , A = None , A = 22050 , A = 5.0 , A = 220 , ) -> Tuple:
UpperCAmelCase : int = OrderedDict()
UpperCAmelCase : Dict = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_a , framework=_a , sampling_rate=_a , time_duration=_a , frequency=_a , )
UpperCAmelCase : int = encoder_inputs["""input_features"""].shape[2]
UpperCAmelCase : Union[str, Any] = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : int = super().generate_dummy_inputs(
preprocessor.tokenizer , _a , _a , _a , _a )
UpperCAmelCase : Optional[Any] = encoder_inputs.pop("""input_features""" )
UpperCAmelCase : Dict = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : List[Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _lowercase( self ) -> List[Any]:
return 1e-3
| 705 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 0 |
import argparse
from collections import defaultdict
import yaml
a : Optional[Any] = """docs/source/en/_toctree.yml"""
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase : Any = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : Any = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Optional[Any] = []
for duplicate_key in duplicates:
UpperCAmelCase : Dict = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(snake_case__ , key=lambda _lowercase : s["title"].lower() )
def __lowerCamelCase ( _lowercase=False ) -> int:
with open(snake_case__ , encoding="""utf-8""" ) as f:
UpperCAmelCase : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : int = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : List[Any] = content[api_idx]["""sections"""]
# Then to the model doc
UpperCAmelCase : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : List[str] = api_doc[model_idx]["""sections"""]
UpperCAmelCase : List[str] = [(idx, section) for idx, section in enumerate(snake_case__ ) if """sections""" in section]
UpperCAmelCase : Optional[Any] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["""sections"""]
UpperCAmelCase : Tuple = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : Optional[Any] = True
if overwrite:
UpperCAmelCase : Optional[int] = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Optional[int] = model_doc
UpperCAmelCase : Union[str, Any] = api_doc
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a : int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase_ ( __magic_name__ ):
def __get__( self , A , A=None ) -> List[Any]:
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
UpperCAmelCase : List[Any] = """__cached_""" + self.fget.__name__
UpperCAmelCase : Optional[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if cached is None:
UpperCAmelCase : str = self.fget(__lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return cached
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : str = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def __lowerCamelCase ( _lowercase ) -> List[str]:
if is_torch_fx_proxy(__snake_case ):
return True
if is_torch_available():
import torch
if isinstance(__snake_case , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__snake_case , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__snake_case , (jnp.ndarray, Tracer) ):
return True
return isinstance(__snake_case , np.ndarray )
def __lowerCamelCase ( _lowercase ) -> Any:
return isinstance(__snake_case , np.ndarray )
def __lowerCamelCase ( _lowercase ) -> Dict:
return _is_numpy(__snake_case )
def __lowerCamelCase ( _lowercase ) -> int:
import torch
return isinstance(__snake_case , torch.Tensor )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
return False if not is_torch_available() else _is_torch(__snake_case )
def __lowerCamelCase ( _lowercase ) -> str:
import torch
return isinstance(__snake_case , torch.device )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
return False if not is_torch_available() else _is_torch_device(__snake_case )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
import torch
if isinstance(__snake_case , __snake_case ):
if hasattr(__snake_case , __snake_case ):
UpperCAmelCase : int = getattr(__snake_case , __snake_case )
else:
return False
return isinstance(__snake_case , torch.dtype )
def __lowerCamelCase ( _lowercase ) -> Any:
return False if not is_torch_available() else _is_torch_dtype(__snake_case )
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
import tensorflow as tf
return isinstance(__snake_case , tf.Tensor )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(__snake_case )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__snake_case , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(__snake_case )
return type(__snake_case ) == tf.Tensor
def __lowerCamelCase ( _lowercase ) -> str:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case )
def __lowerCamelCase ( _lowercase ) -> List[str]:
import jax.numpy as jnp # noqa: F811
return isinstance(__snake_case , jnp.ndarray )
def __lowerCamelCase ( _lowercase ) -> str:
return False if not is_flax_available() else _is_jax(__snake_case )
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_py_obj(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return [to_py_obj(__snake_case ) for o in obj]
elif is_tf_tensor(__snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case ).tolist()
elif isinstance(__snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __lowerCamelCase ( _lowercase ) -> Any:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_numpy(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return np.array(__snake_case )
elif is_tf_tensor(__snake_case ):
return obj.numpy()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case )
else:
return obj
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__lowerCAmelCase ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self , class_fields[0].name )
UpperCAmelCase : List[str] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] = first_field.items()
UpperCAmelCase : int = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__lowerCAmelCase )
UpperCAmelCase : Any = True
except TypeError:
UpperCAmelCase : int = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__lowerCAmelCase ):
if (
not isinstance(__lowerCAmelCase , (list, tuple) )
or not len(__lowerCAmelCase ) == 2
or not isinstance(element[0] , __lowerCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase : int = element[1]
elif first_field is not None:
UpperCAmelCase : int = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self , field.name )
if v is not None:
UpperCAmelCase : Union[str, Any] = v
def __delitem__( self , *A , **A ) -> Dict:
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowercase( self , *A , **A ) -> List[Any]:
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowercase( self , *A , **A ) -> Union[str, Any]:
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowercase( self , *A , **A ) -> Any:
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self , A ) -> str:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Tuple = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , A , A ) -> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__lowerCAmelCase , __lowerCAmelCase )
super().__setattr__(__lowerCAmelCase , __lowerCAmelCase )
def __setitem__( self , A , A ) -> Optional[int]:
super().__setitem__(__lowerCAmelCase , __lowerCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase( self ) -> str:
return tuple(self[k] for k in self.keys() )
class UpperCamelCase_ ( __magic_name__ , __magic_name__ ):
@classmethod
def _lowercase( cls , A ) -> int:
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'longest'
lowercase = 'max_length'
lowercase = 'do_not_pad'
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'pt'
lowercase = 'tf'
lowercase = 'np'
lowercase = 'jax'
class UpperCamelCase_ :
def __init__( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = context_managers
UpperCAmelCase : Optional[int] = ExitStack()
def __enter__( self ) -> str:
for context_manager in self.context_managers:
self.stack.enter_context(__lowerCAmelCase )
def __exit__( self , *A , **A ) -> Dict:
self.stack.__exit__(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : List[str] = infer_framework(__snake_case )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : str = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(__snake_case )
if framework == "tf":
UpperCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __lowerCamelCase ( _lowercase , _lowercase = "" , _lowercase = "." ) -> Union[str, Any]:
def _flatten_dict(_lowercase , _lowercase="" , _lowercase="." ):
for k, v in d.items():
UpperCAmelCase : str = str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k
if v and isinstance(__snake_case , __snake_case ):
yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(__snake_case , __snake_case , __snake_case ) )
@contextmanager
def __lowerCamelCase ( _lowercase , _lowercase = False ) -> List[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]:
if is_numpy_array(__snake_case ):
return np.transpose(__snake_case , axes=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.T if axes is None else array.permute(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.transpose(__snake_case , perm=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.transpose(__snake_case , axes=__snake_case )
else:
raise ValueError(F'''Type not supported for transpose: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
if is_numpy_array(__snake_case ):
return np.reshape(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.reshape(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.reshape(__snake_case , __snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.reshape(__snake_case , __snake_case )
else:
raise ValueError(F'''Type not supported for reshape: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> Union[str, Any]:
if is_numpy_array(__snake_case ):
return np.squeeze(__snake_case , axis=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.squeeze(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.squeeze(__snake_case , axis=__snake_case )
else:
raise ValueError(F'''Type not supported for squeeze: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if is_numpy_array(__snake_case ):
return np.expand_dims(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.unsqueeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.expand_dims(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.expand_dims(__snake_case , axis=__snake_case )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
if is_numpy_array(__snake_case ):
return np.size(__snake_case )
elif is_torch_tensor(__snake_case ):
return array.numel()
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.size(__snake_case )
elif is_jax_tensor(__snake_case ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
for key, value in auto_map.items():
if isinstance(__snake_case , (tuple, list) ):
UpperCAmelCase : Dict = [F'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : str = F'''{repo_id}--{value}'''
return auto_map
def __lowerCamelCase ( _lowercase ) -> Dict:
for base_class in inspect.getmro(__snake_case ):
UpperCAmelCase : int = base_class.__module__
UpperCAmelCase : List[Any] = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=_UpperCAmelCase ):
lowercase = ['torch', 'torchsde']
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _lowercase( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _lowercase( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 708 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
a : Tuple = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
a : Tuple = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
a : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
a : Tuple = F'''down_blocks.{i}.resnets.{j}.'''
a : Optional[int] = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
a : Dict = F'''down_blocks.{i}.attentions.{j}.'''
a : int = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
a : Dict = F'''up_blocks.{i}.resnets.{j}.'''
a : int = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
a : Any = F'''up_blocks.{i}.attentions.{j}.'''
a : Optional[int] = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
a : Dict = F'''down_blocks.{i}.downsamplers.0.conv.'''
a : int = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
a : Tuple = F'''up_blocks.{i}.upsamplers.0.'''
a : Optional[Any] = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
a : Optional[Any] = '''mid_block.attentions.0.'''
a : Optional[int] = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
a : List[str] = F'''mid_block.resnets.{j}.'''
a : Optional[Any] = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Optional[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase : str = v.replace(_lowercase , _lowercase )
UpperCAmelCase : Tuple = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase : Any = v.replace(_lowercase , _lowercase )
UpperCAmelCase : Any = v
UpperCAmelCase : Any = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
a : Any = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
a : List[str] = F'''encoder.down_blocks.{i}.resnets.{j}.'''
a : Dict = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
a : int = F'''down_blocks.{i}.downsamplers.0.'''
a : List[Any] = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
a : Tuple = F'''up_blocks.{i}.upsamplers.0.'''
a : List[str] = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
a : Union[str, Any] = F'''decoder.up_blocks.{i}.resnets.{j}.'''
a : Optional[Any] = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
a : Dict = F'''mid_block.resnets.{i}.'''
a : Tuple = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
a : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __lowerCamelCase ( _lowercase ) -> int:
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase : Tuple = v.replace(_lowercase , _lowercase )
UpperCAmelCase : int = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase : int = v.replace(_lowercase , _lowercase )
UpperCAmelCase : int = v
UpperCAmelCase : Dict = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase : int = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
UpperCAmelCase : Optional[int] = reshape_weight_for_sd(_lowercase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
a : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
a : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
a : str = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
a : Optional[int] = {'''q''': 0, '''k''': 1, '''v''': 2}
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : int = {}
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
UpperCAmelCase : Dict = k[: -len(""".q_proj.weight""" )]
UpperCAmelCase : str = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase : List[Any] = [None, None, None]
UpperCAmelCase : Union[str, Any] = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
UpperCAmelCase : str = k[: -len(""".q_proj.bias""" )]
UpperCAmelCase : Tuple = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase : List[Any] = [None, None, None]
UpperCAmelCase : str = v
continue
UpperCAmelCase : str = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
UpperCAmelCase : Any = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
UpperCAmelCase : str = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
UpperCAmelCase : Dict = torch.cat(_lowercase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
UpperCAmelCase : Optional[Any] = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
UpperCAmelCase : str = torch.cat(_lowercase )
return new_state_dict
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
return text_enc_dict
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
a : List[str] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
a : Any = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
a : Union[str, Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
a : Dict = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
a : Dict = load_file(unet_path, device="""cpu""")
else:
a : Optional[Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
a : int = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
a : Dict = load_file(vae_path, device="""cpu""")
else:
a : Any = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
a : str = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
a : str = load_file(text_enc_path, device="""cpu""")
else:
a : Union[str, Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
a : Dict = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
a : Any = convert_unet_state_dict(unet_state_dict)
a : List[Any] = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
a : Optional[int] = convert_vae_state_dict(vae_state_dict)
a : Tuple = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
a : Optional[int] = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
a : int = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
a : List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
a : Optional[int] = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
a : str = convert_text_enc_state_dict(text_enc_dict)
a : List[Any] = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
a : Any = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
a : Optional[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
a : str = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 709 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 0 |
'''simple docstring'''
import baseaa
def __lowerCamelCase ( _lowercase ) -> Dict:
return baseaa.baaencode(string.encode("""utf-8""" ) )
def __lowerCamelCase ( _lowercase ) -> Dict:
return baseaa.baadecode(UpperCAmelCase__ ).decode("""utf-8""" )
if __name__ == "__main__":
a : List[str] = '''Hello World!'''
a : List[str] = baseaa_encode(test)
print(encoded)
a : str = baseaa_decode(encoded)
print(decoded)
| 710 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a : str = logging.get_logger(__name__)
a : List[str] = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class UpperCamelCase_ ( __UpperCAmelCase ):
lowercase = """t5"""
lowercase = ["""past_key_values"""]
lowercase = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , A=32128 , A=512 , A=64 , A=2048 , A=6 , A=None , A=8 , A=32 , A=128 , A=0.1 , A=1e-6 , A=1.0 , A="relu" , A=True , A=True , A=0 , A=1 , **A , ) -> Optional[int]:
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : List[str] = d_model
UpperCAmelCase : str = d_kv
UpperCAmelCase : Tuple = d_ff
UpperCAmelCase : List[Any] = num_layers
UpperCAmelCase : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase : Any = num_heads
UpperCAmelCase : Dict = relative_attention_num_buckets
UpperCAmelCase : Any = relative_attention_max_distance
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Union[str, Any] = layer_norm_epsilon
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : Optional[Any] = feed_forward_proj
UpperCAmelCase : Optional[int] = use_cache
UpperCAmelCase : Tuple = self.feed_forward_proj.split("""-""" )
UpperCAmelCase : Optional[int] = act_info[-1]
UpperCAmelCase : int = act_info[0] == """gated"""
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase : Union[str, Any] = """gelu_new"""
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
class UpperCamelCase_ ( __UpperCAmelCase ):
@property
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
UpperCAmelCase : Tuple = """past_encoder_sequence + sequence"""
UpperCAmelCase : Tuple = {0: """batch"""}
UpperCAmelCase : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction="""inputs""" )
return common_inputs
@property
def _lowercase( self ) -> Tuple:
return 13
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 0 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = np.inf
def set_batch_size(_lowercase ) -> None:
nonlocal batch_size
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : int = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Tuple = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowercase , _lowercase ) and feature.dtype == "binary":
UpperCAmelCase : Optional[Any] = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowercase , _lowercase )
return None if batch_size is np.inf else batch_size
class UpperCamelCase_ ( UpperCamelCase_ ):
def __init__( self , A , A = None , A = None , A = None , A = False , A = False , A = None , **A , ) -> int:
super().__init__(
UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase : Optional[int] = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths}
UpperCAmelCase : Union[str, Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
UpperCAmelCase : Optional[Any] = Parquet(
cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , hash=UpperCamelCase__ , **UpperCamelCase__ , )
def _lowercase( self ) -> Dict:
if self.streaming:
UpperCAmelCase : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = None
UpperCAmelCase : str = None
UpperCAmelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , )
UpperCAmelCase : str = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase_ :
def __init__( self , A , A , A = None , **A , ) -> str:
UpperCAmelCase : Optional[Any] = dataset
UpperCAmelCase : Any = path_or_buf
UpperCAmelCase : Any = batch_size or get_writer_batch_size(dataset.features )
UpperCAmelCase : Tuple = parquet_writer_kwargs
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
UpperCAmelCase : Optional[int] = self._write(file_obj=UpperCamelCase__ , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
else:
UpperCAmelCase : List[Any] = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
return written
def _lowercase( self , A , A , **A ) -> str:
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : str = parquet_writer_kwargs.pop("""path_or_buf""" , UpperCamelCase__ )
UpperCAmelCase : List[Any] = self.dataset.features.arrow_schema
UpperCAmelCase : List[Any] = pq.ParquetWriter(UpperCamelCase__ , schema=UpperCamelCase__ , **UpperCamelCase__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase__ ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
UpperCAmelCase : List[Any] = query_table(
table=self.dataset._data , key=slice(UpperCamelCase__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase__ )
written += batch.nbytes
writer.close()
return written
| 712 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 0 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : str = np.inf
def set_batch_size(_lowercase ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase : List[str] = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase : Optional[int] = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and feature.dtype == "binary":
UpperCAmelCase : Dict = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase , __UpperCamelCase )
return None if batch_size is np.inf else batch_size
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A = None , A = None , A = None , A = False , A = False , A = None , **A , ) -> Any:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
UpperCAmelCase : List[str] = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
UpperCAmelCase : Optional[Any] = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _lowercase( self ) -> Tuple:
if self.streaming:
UpperCAmelCase : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase : List[Any] = None
UpperCAmelCase : int = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : List[Any] = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
UpperCAmelCase : str = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase_ :
def __init__( self , A , A , A = None , **A , ) -> Union[str, Any]:
UpperCAmelCase : int = dataset
UpperCAmelCase : Tuple = path_or_buf
UpperCAmelCase : Tuple = batch_size or get_writer_batch_size(dataset.features )
UpperCAmelCase : List[Any] = parquet_writer_kwargs
def _lowercase( self ) -> int:
UpperCAmelCase : int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
UpperCAmelCase : List[str] = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
UpperCAmelCase : Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def _lowercase( self , A , A , **A ) -> int:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Dict = parquet_writer_kwargs.pop("""path_or_buf""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.dataset.features.arrow_schema
UpperCAmelCase : Dict = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
UpperCAmelCase : Optional[int] = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 713 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : List[str] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
lowercase = 'mobilenet_v1'
def __init__( self , A=3 , A=224 , A=1.0 , A=8 , A="relu6" , A=True , A=0.9_9_9 , A=0.0_2 , A=0.0_0_1 , **A , ) -> Union[str, Any]:
super().__init__(**A )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : List[Any] = image_size
UpperCAmelCase : List[str] = depth_multiplier
UpperCAmelCase : Union[str, Any] = min_depth
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : List[str] = tf_padding
UpperCAmelCase : int = classifier_dropout_prob
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
class UpperCamelCase_ ( UpperCAmelCase__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _lowercase( self ) -> float:
return 1e-4
| 714 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 0 |
import math
from datetime import datetime, timedelta
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Union[str, Any] = year % 1_9
UpperCAmelCase : List[str] = year % 4
UpperCAmelCase : str = year % 7
UpperCAmelCase : List[str] = math.floor(year / 1_0_0 )
UpperCAmelCase : Optional[int] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
UpperCAmelCase : Any = leap_day_inhibits / 4
UpperCAmelCase : Tuple = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
UpperCAmelCase : int = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase : Union[str, Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
UpperCAmelCase : Dict = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE_ , 4 , 1_8 )
else:
return datetime(SCREAMING_SNAKE_CASE_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
a : List[Any] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Tuple = """▁"""
a : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
a : List[str] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
a : Dict = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
a : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCamelCase_ ( __snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = []
lowercase = []
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=None , A=None , A=None , A = None , A=None , **A , ) -> Tuple:
UpperCAmelCase : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , tokenizer_file=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
UpperCAmelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase : Any = 1
UpperCAmelCase : int = len(self.sp_model )
UpperCAmelCase : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A_ )
}
UpperCAmelCase : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase : Any = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase : int = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase : str = self.lang_code_to_id[self._src_lang]
UpperCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> int:
UpperCAmelCase : Dict = self.__dict__.copy()
UpperCAmelCase : Tuple = None
UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> List[Any]:
UpperCAmelCase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowercase( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowercase( self ) -> Dict:
return self._src_lang
@src_lang.setter
def _lowercase( self , A ) -> Optional[Any]:
UpperCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase( self , A , A = None , A = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
UpperCAmelCase : int = [1] * len(self.prefix_tokens )
UpperCAmelCase : List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _lowercase( self , A , A = None ) -> Optional[Any]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase( self , A , A = None ) -> List[Any]:
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A , A , A , **A ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase : Tuple = src_lang
UpperCAmelCase : Tuple = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(A_ )
UpperCAmelCase : Dict = tgt_lang_id
return inputs
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase( self , A ) -> int:
return self.sp_model.encode(A_ , out_type=A_ )
def _lowercase( self , A ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowercase( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : Dict = "".join(A_ ).replace(A_ , """ """ ).strip()
return out_string
def _lowercase( self , A , A = None ) -> str:
if not os.path.isdir(A_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : List[str] = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , """wb""" ) as fi:
UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def _lowercase( self , A , A = "en_XX" , A = None , A = "ro_RO" , **A , ) -> Union[str, Any]:
UpperCAmelCase : str = src_lang
UpperCAmelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _lowercase( self ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase( self ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase( self , A ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.lang_code_to_id[src_lang]
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = self.lang_code_to_id[lang]
UpperCAmelCase : Any = []
UpperCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
| 716 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a : Tuple = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase=False , _lowercase=False ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """backbone.""" if is_semantic else """"""
UpperCAmelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(F'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(F'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(F'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False , _lowercase=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Union[str, Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : str = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : Dict = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : List[str] = q_bias
UpperCAmelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : Optional[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : List[str] = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Union[str, Any] = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : Union[str, Any] = gamma_a
UpperCAmelCase : Dict = gamma_a
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : str = dct.pop(_lowercase )
UpperCAmelCase : int = val
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False ) -> str:
UpperCAmelCase : Optional[Any] = False if """rvlcdip""" in checkpoint_url else True
UpperCAmelCase : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowercase , use_mask_token=_lowercase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 1_0_2_4
UpperCAmelCase : Optional[int] = 4_0_9_6
UpperCAmelCase : Optional[Any] = 2_4
UpperCAmelCase : Optional[Any] = 1_6
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Tuple = 1_6
UpperCAmelCase : int = """huggingface/label-files"""
UpperCAmelCase : Optional[int] = """rvlcdip-id2label.json"""
UpperCAmelCase : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : int = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" )["""model"""]
UpperCAmelCase : List[str] = create_rename_keys(_lowercase , has_lm_head=_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , _lowercase , has_lm_head=_lowercase )
# load HuggingFace model
UpperCAmelCase : Optional[int] = BeitForMaskedImageModeling(_lowercase ) if has_lm_head else BeitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# Check outputs on an image
UpperCAmelCase : Union[str, Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
UpperCAmelCase : Optional[Any] = model(_lowercase )
UpperCAmelCase : Any = outputs.logits
# verify logits
UpperCAmelCase : Union[str, Any] = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(_lowercase ), "Shape of logits not as expected"
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
UpperCAmelCase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowercase , )
model.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowercase , )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
a : int = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = None ) -> List[str]:
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" , revision=__lowerCAmelCase )
| 718 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( __a , unittest.TestCase ):
lowercase = DDIMPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase = False
def _lowercase( self ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
UpperCAmelCase : int = DDIMScheduler()
UpperCAmelCase : int = {"""unet""": unet, """scheduler""": scheduler}
return components
def _lowercase( self , A , A=0 ) -> Union[str, Any]:
if str(lowerCAmelCase_ ).startswith("""mps""" ):
UpperCAmelCase : Any = torch.manual_seed(lowerCAmelCase_ )
else:
UpperCAmelCase : int = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase : Dict = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[Any] = """cpu"""
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : List[Any] = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase : List[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase : Dict = pipe(**lowerCAmelCase_ ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCAmelCase : List[str] = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
UpperCAmelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 )
def _lowercase( self ) -> Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _lowercase( self ) -> Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def _lowercase( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _lowercase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Dict:
UpperCAmelCase : List[str] = """google/ddpm-cifar10-32"""
UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = DDIMScheduler()
UpperCAmelCase : List[str] = DDIMPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
ddim.to(lowerCAmelCase_ )
ddim.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : List[str] = ddim(generator=lowerCAmelCase_ , eta=0.0 , output_type="""numpy""" ).images
UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Optional[int] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase( self ) -> Dict:
UpperCAmelCase : str = """google/ddpm-ema-bedroom-256"""
UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase : Dict = DDIMScheduler.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase : Dict = DDIMPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
ddpm.to(lowerCAmelCase_ )
ddpm.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = ddpm(generator=lowerCAmelCase_ , output_type="""numpy""" ).images
UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Tuple = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 719 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = None
# Automatically constructed
lowercase = "dict"
lowercase = None
lowercase = field(default='Translation' , init=__lowercase , repr=__lowercase )
def __call__( self ) -> List[str]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _lowercase( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = None
lowercase = None
# Automatically constructed
lowercase = "dict"
lowercase = None
lowercase = field(default='TranslationVariableLanguages' , init=__lowercase , repr=__lowercase )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase : List[Any] = len(self.languages ) if self.languages else None
def __call__( self ) -> List[Any]:
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def _lowercase( self , A ) -> Any:
UpperCAmelCase : str = set(self.languages )
if self.languages and set(__a ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(__a ) - lang_set ) )}) are not in valid set ({', '.join(__a )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase : Any = []
for lang, text in translation_dict.items():
if isinstance(__a , __a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase : Tuple = zip(*sorted(__a ) )
return {"language": languages, "translation": translations}
def _lowercase( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 720 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
a : Optional[Any] = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
a : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowerCamelCase ( _lowercase ) -> Dict:
if "://" in dataset_path:
UpperCAmelCase : Optional[int] = dataset_path.split("""://""" )[1]
return dataset_path
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : Optional[Any] = not is_remote_filesystem(_UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_UpperCamelCase ) , fs._strip_protocol(_UpperCamelCase ) )
else:
fs.mv(_UpperCamelCase , _UpperCamelCase , recursive=_UpperCamelCase )
def __lowerCamelCase ( ) -> Union[str, Any]:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase : int = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : Optional[int] = threading.Lock()
| 721 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase_ ( __a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
@property
def _lowercase( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : int = ort.SessionOptions()
UpperCAmelCase : str = False
return options
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCAmelCase : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase : List[Any] = """A red cat sitting on a park bench"""
UpperCAmelCase : Tuple = np.random.RandomState(0 )
UpperCAmelCase : List[Any] = pipe(
prompt=A__ , image=A__ , mask_image=A__ , guidance_scale=7.5 , num_inference_steps=10 , generator=A__ , output_type="""np""" , )
UpperCAmelCase : Tuple = output.images
UpperCAmelCase : Dict = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Optional[Any] = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase( self ) -> int:
UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCAmelCase : int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=A__ , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase : Dict = """A red cat sitting on a park bench"""
UpperCAmelCase : Optional[int] = np.random.RandomState(0 )
UpperCAmelCase : Optional[Any] = pipe(
prompt=A__ , image=A__ , mask_image=A__ , guidance_scale=7.5 , num_inference_steps=20 , generator=A__ , output_type="""np""" , )
UpperCAmelCase : Any = output.images
UpperCAmelCase : List[str] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Any = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 700 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 0 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase_ :
def __init__( self , A ) -> Tuple:
if isinstance(A , A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
UpperCAmelCase : int = deepcopy(A )
elif os.path.exists(A ):
with io.open(A , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : int = json.load(A )
else:
try:
UpperCAmelCase : int = baseaa.urlsafe_baadecode(A ).decode("""utf-8""" )
UpperCAmelCase : List[Any] = json.loads(A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
UpperCAmelCase : List[str] = config
self.set_stage_and_offload()
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
UpperCAmelCase : Union[str, Any] = False
if self.is_zeroa() or self.is_zeroa():
UpperCAmelCase : Union[str, Any] = set(["""cpu""", """nvme"""] )
UpperCAmelCase : Any = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
UpperCAmelCase : Union[str, Any] = True
def _lowercase( self , A ) -> List[str]:
UpperCAmelCase : str = self.config
# find the config node of interest if it exists
UpperCAmelCase : Any = ds_key_long.split(""".""" )
UpperCAmelCase : List[Any] = nodes.pop()
for node in nodes:
UpperCAmelCase : int = config.get(A )
if config is None:
return None, ds_key
return config, ds_key
def _lowercase( self , A , A=None ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.find_config_node(A )
if config is None:
return default
return config.get(A , A )
def _lowercase( self , A , A=False ) -> int:
UpperCAmelCase : List[str] = self.config
# find the config node of interest if it exists
UpperCAmelCase : Tuple = ds_key_long.split(""".""" )
for node in nodes:
UpperCAmelCase : Optional[int] = config
UpperCAmelCase : Optional[Any] = config.get(A )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(A )
def _lowercase( self , A ) -> List[str]:
UpperCAmelCase : Dict = self.get_value(A )
return False if value is None else bool(A )
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.get_value(A )
return False if value is None else not bool(A )
def _lowercase( self ) -> List[str]:
return self._stage == 2
def _lowercase( self ) -> Any:
return self._stage == 3
def _lowercase( self ) -> List[Any]:
return self._offload
class UpperCamelCase_ :
def __init__( self , A ) -> str:
UpperCAmelCase : List[str] = engine
def _lowercase( self , A , **A ) -> List[str]:
self.engine.backward(A , **A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A ) -> Any:
super().__init__(A , device_placement=A , scaler=A )
UpperCAmelCase : Any = hasattr(self.optimizer , """overflow""" )
def _lowercase( self , A=None ) -> str:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowercase( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowercase( self ) -> str:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A ) -> Tuple:
super().__init__(A , A )
def _lowercase( self ) -> str:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase_ :
def __init__( self , A , A=0.0_0_1 , A=0 , **A ) -> Union[str, Any]:
UpperCAmelCase : Any = params
UpperCAmelCase : int = lr
UpperCAmelCase : Tuple = weight_decay
UpperCAmelCase : Dict = kwargs
class UpperCamelCase_ :
def __init__( self , A , A=None , A=0 , **A ) -> Optional[Any]:
UpperCAmelCase : Tuple = optimizer
UpperCAmelCase : Optional[int] = total_num_steps
UpperCAmelCase : Tuple = warmup_num_steps
UpperCAmelCase : Union[str, Any] = kwargs
| 701 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 0 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase_ ( __snake_case , __snake_case ):
lowercase = 'pixel_values'
lowercase = False
lowercase = TimmBackboneConfig
def __init__( self , A , **A ) -> Tuple:
requires_backends(self , """timm""" )
super().__init__(__UpperCamelCase )
UpperCAmelCase : Optional[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(__UpperCamelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
UpperCAmelCase : Optional[Any] = getattr(__UpperCamelCase , """use_pretrained_backbone""" , __UpperCamelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCAmelCase : Dict = config.out_indices if getattr(__UpperCamelCase , """out_indices""" , __UpperCamelCase ) is not None else (-1,)
UpperCAmelCase : Any = timm.create_model(
config.backbone , pretrained=__UpperCamelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__UpperCamelCase , **__UpperCamelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCAmelCase : int = self._backbone.return_layers
UpperCAmelCase : Dict = {layer["""module"""]: str(__UpperCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__UpperCamelCase )
@classmethod
def _lowercase( cls , A , *A , **A ) -> List[str]:
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCAmelCase : int = kwargs.pop("""config""" , TimmBackboneConfig() )
UpperCAmelCase : str = kwargs.pop("""use_timm_backbone""" , __UpperCamelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
UpperCAmelCase : Dict = kwargs.pop("""num_channels""" , config.num_channels )
UpperCAmelCase : Optional[Any] = kwargs.pop("""features_only""" , config.features_only )
UpperCAmelCase : Tuple = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
UpperCAmelCase : Any = kwargs.pop("""out_indices""" , config.out_indices )
UpperCAmelCase : Union[str, Any] = TimmBackboneConfig(
backbone=__UpperCamelCase , num_channels=__UpperCamelCase , features_only=__UpperCamelCase , use_pretrained_backbone=__UpperCamelCase , out_indices=__UpperCamelCase , )
return super()._from_config(__UpperCamelCase , **__UpperCamelCase )
def _lowercase( self , A ) -> int:
pass
def _lowercase( self , A , A=None , A=None , A=None , **A ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCAmelCase : int = self._all_layers
UpperCAmelCase : List[str] = self._backbone(__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase : Tuple = self._return_layers
UpperCAmelCase : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCAmelCase : List[Any] = self._backbone(__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = tuple(__UpperCamelCase )
UpperCAmelCase : List[Any] = tuple(__UpperCamelCase ) if hidden_states is not None else None
if not return_dict:
UpperCAmelCase : int = (feature_maps,)
if output_hidden_states:
UpperCAmelCase : Union[str, Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__UpperCamelCase , hidden_states=__UpperCamelCase , attentions=__UpperCamelCase )
| 702 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a : List[str] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase ) -> List[Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
return max(metric_fn(lowerCAmelCase_ , lowerCAmelCase_ ) for gt in ground_truths )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = [line.strip() for line in open(lowerCAmelCase_ , """r""" ).readlines()]
UpperCAmelCase : int = []
if args.gold_data_mode == "qa":
UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase_ , sep="""\t""" , header=lowerCAmelCase_ )
for answer_list in data[1]:
UpperCAmelCase : Any = ast.literal_eval(lowerCAmelCase_ )
answers.append(lowerCAmelCase_ )
else:
UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase_ , """r""" ).readlines()]
UpperCAmelCase : List[Any] = [[reference] for reference in references]
UpperCAmelCase : str = 0
for prediction, ground_truths in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
fa += metric_max_over_ground_truths(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = 100.0 * em / total
UpperCAmelCase : Dict = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
UpperCAmelCase : int = args.k
UpperCAmelCase : List[str] = [line.strip() for line in open(lowerCAmelCase_ , """r""" ).readlines()]
UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase_ , """r""" ).readlines()]
UpperCAmelCase : str = 0
for hypo, reference in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = set(hypo.split("""\t""" )[:k] )
UpperCAmelCase : int = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCAmelCase : str = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
def strip_title(_lowercase ):
if title.startswith("""\"""" ):
UpperCAmelCase : int = title[1:]
if title.endswith("""\"""" ):
UpperCAmelCase : Optional[Any] = title[:-1]
return title
UpperCAmelCase : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )['''input_ids'''].to(args.device )
UpperCAmelCase : Any = rag_model.rag.question_encoder(lowerCAmelCase_ )
UpperCAmelCase : Any = question_enc_outputs[0]
UpperCAmelCase : Optional[Any] = rag_model.retriever(
lowerCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
UpperCAmelCase : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCAmelCase : Any = []
for docs in all_docs:
UpperCAmelCase : Optional[int] = [strip_title(lowerCAmelCase_ ) for title in docs['''title''']]
provenance_strings.append("""\t""".join(lowerCAmelCase_ ) )
return provenance_strings
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
with torch.no_grad():
UpperCAmelCase : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
UpperCAmelCase : Tuple = inputs_dict.input_ids.to(args.device )
UpperCAmelCase : int = inputs_dict.attention_mask.to(args.device )
UpperCAmelCase : int = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCAmelCase : Union[str, Any] = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.info("""Q: {} - A: {}""".format(lowerCAmelCase_ , lowerCAmelCase_ ) )
return answers
def __lowerCamelCase ( ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase_ , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=lowerCAmelCase_ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase_ , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase_ , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase_ , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase_ , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase_ , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=lowerCAmelCase_ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=lowerCAmelCase_ , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=lowerCAmelCase_ , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase_ , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=5_0 , type=lowerCAmelCase_ , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
UpperCAmelCase : Optional[Any] = parser.parse_args()
UpperCAmelCase : List[str] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : str = {}
if args.model_type is None:
UpperCAmelCase : int = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
UpperCAmelCase : Optional[int] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
UpperCAmelCase : Optional[int] = args.n_docs
if args.index_name is not None:
UpperCAmelCase : str = args.index_name
if args.index_path is not None:
UpperCAmelCase : Any = args.index_path
else:
UpperCAmelCase : str = BartForConditionalGeneration
UpperCAmelCase : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
UpperCAmelCase : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(lowerCAmelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase_ ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
UpperCAmelCase : List[str] = RagRetriever.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase : List[Any] = model_class.from_pretrained(lowerCAmelCase_ , retriever=lowerCAmelCase_ , **lowerCAmelCase_ )
model.retriever.init_retrieval()
else:
UpperCAmelCase : Optional[Any] = model_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
UpperCAmelCase : Tuple = []
for line in tqdm(lowerCAmelCase_ ):
questions.append(line.strip() )
if len(lowerCAmelCase_ ) == args.eval_batch_size:
UpperCAmelCase : List[Any] = evaluate_batch_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
preds_file.write("""\n""".join(lowerCAmelCase_ ) + """\n""" )
preds_file.flush()
UpperCAmelCase : Any = []
if len(lowerCAmelCase_ ) > 0:
UpperCAmelCase : List[Any] = evaluate_batch_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
preds_file.write("""\n""".join(lowerCAmelCase_ ) )
preds_file.flush()
score_fn(lowerCAmelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a : int = get_args()
main(args)
| 703 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase ) -> Tuple:
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase : Optional[int] = tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
UpperCAmelCase : List[str] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def __lowerCamelCase ( _lowercase , _lowercase = None , _lowercase = None ) -> str:
return tf.nn.softmax(logits=logits + 1e-9 , axis=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=1e-5 , _lowercase=-1 ) -> Optional[int]:
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
UpperCAmelCase : str = tf.nn.moments(SCREAMING_SNAKE_CASE__ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase : Optional[Any] = [1] * inputs.shape.rank
UpperCAmelCase : Any = shape_list(SCREAMING_SNAKE_CASE__ )[axis]
UpperCAmelCase : List[str] = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : List[Any] = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase : List[Any] = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , variance_epsilon=SCREAMING_SNAKE_CASE__ , )
return outputs
def __lowerCamelCase ( _lowercase , _lowercase=0 , _lowercase=-1 ) -> int:
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase : int = tf.shape(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : str = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( _lowercase ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase : Optional[int] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = "input_ids" ) -> Union[str, Any]:
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__ , tf.cast(SCREAMING_SNAKE_CASE__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : Any = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase : Optional[int] = [x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
UpperCAmelCase : str = np.asarray(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[Any] = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase : Any = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : List[Any] = chunk_data
else:
UpperCAmelCase : Union[str, Any] = data
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
if name in group.attrs:
UpperCAmelCase : Tuple = [n.decode("""utf8""" ) if hasattr(SCREAMING_SNAKE_CASE__ , """decode""" ) else n for n in group.attrs[name]]
else:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(SCREAMING_SNAKE_CASE__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowerCamelCase ( _lowercase ) -> Tuple:
def _expand_single_ad_tensor(_lowercase ):
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE__ )
| 704 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 0 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
a : Any = """bart"""
a : int = True
@st.cache(allow_output_mutation=a_ )
def __lowerCamelCase ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
UpperCAmelCase : Dict = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
UpperCAmelCase : Any = qar_model.eval()
else:
UpperCAmelCase : Tuple = (None, None)
if MODEL_TYPE == "bart":
UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
UpperCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
UpperCAmelCase : Optional[int] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
UpperCAmelCase : str = sas_model.eval()
else:
UpperCAmelCase : int = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a_ )
def __lowerCamelCase ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
UpperCAmelCase : Union[str, Any] = faiss.StandardGpuResources()
UpperCAmelCase : Optional[int] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )['''train''']
UpperCAmelCase : Dict = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_2_8) , )
UpperCAmelCase : List[str] = faiss.IndexFlatIP(1_2_8 )
UpperCAmelCase : List[str] = faiss.index_cpu_to_gpu(a_ , 1 , a_ )
wikiaab_gpu_index_flat.add(a_ ) # TODO fix for larger GPU
else:
UpperCAmelCase : Any = (None, None)
UpperCAmelCase : Union[str, Any] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a_ )
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[Any] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
UpperCAmelCase : List[Any] = elia['''train_eli5''']
UpperCAmelCase : Optional[int] = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_2_8) )
UpperCAmelCase : str = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(a_ )
return (elia_train, eli5_train_q_index)
a , a , a : Any = load_indexes()
a , a , a , a : Union[str, Any] = load_models()
a , a : Optional[int] = load_train_data()
def __lowerCamelCase ( _lowercase , _lowercase=1_0 ) -> Dict:
UpperCAmelCase : Any = embed_questions_for_retrieval([question] , a_ , a_ )
UpperCAmelCase : Any = eli5_train_q_index.search(a_ , a_ )
UpperCAmelCase : Dict = [elia_train[int(a_ )] for i in I[0]]
return nn_examples
def __lowerCamelCase ( _lowercase , _lowercase="wiki40b" , _lowercase="dense" , _lowercase=1_0 ) -> List[Any]:
if source == "none":
UpperCAmelCase : int = (''' <P> '''.join(["""""" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
UpperCAmelCase : List[str] = query_qa_dense_index(
a_ , a_ , a_ , a_ , a_ , a_ )
else:
UpperCAmelCase : Dict = query_es_index(
a_ , a_ , index_name="""english_wiki40b_snippets_100w""" , n_results=a_ , )
UpperCAmelCase : Any = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
UpperCAmelCase : List[Any] = '''question: {} context: {}'''.format(a_ , a_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowercase : None),
} )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=6_4 , _lowercase=2_5_6 , _lowercase=False , _lowercase=2 , _lowercase=0.95 , _lowercase=0.8 ) -> Dict:
with torch.no_grad():
UpperCAmelCase : Optional[Any] = qa_sas_generate(
a_ , a_ , a_ , num_answers=1 , num_beams=a_ , min_len=a_ , max_len=a_ , do_sample=a_ , temp=a_ , top_p=a_ , top_k=a_ , max_input_length=1_0_2_4 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
a : Any = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
a : Optional[int] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
a : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
a : Optional[Any] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
a : Optional[Any] = st.sidebar.checkbox("""Demo options""")
if demo_options:
a : str = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
a : Optional[int] = action_list.index(action_st)
a : List[str] = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
a : Dict = show_type == """Show full text of passages"""
else:
a : Any = 3
a : Tuple = True
a : Dict = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
a : List[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
a : Tuple = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
a : List[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
a : Union[str, Any] = """wiki40b"""
a : Tuple = """dense"""
a : Dict = """beam"""
a : Union[str, Any] = 2
a : Optional[int] = 6_4
a : Tuple = 2_5_6
a : List[str] = None
a : List[Any] = None
a : Any = st.sidebar.checkbox("""Generation options""")
if generate_options:
a : Tuple = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
a : Optional[int] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
a : int = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
a : int = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
a : Tuple = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
a : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
a : Union[str, Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
a : Optional[Any] = None
# start main text
a : int = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
a : Optional[Any] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
a : str = st.text_input("""Enter your question here:""", """""")
else:
a : List[Any] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
a , a : Tuple = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
a , a : List[str] = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
a : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
a : int = support_list[:1_0]
a : Optional[Any] = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
a , a : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
a , a : List[str] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
a : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
a : Tuple = res[1].strip()
if sec_titles == "":
a : int = """[{}]({})""".format(res[0], wiki_url)
else:
a : Dict = sec_titles.split(""" & """)
a : List[Any] = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
a : int = find_nearest_training(question)
a : str = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
a : Union[str, Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
a : int = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 705 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : List[str] = 3_8_4
if "tiny" in model_name:
UpperCAmelCase : Any = [3, 3, 9, 3]
UpperCAmelCase : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
UpperCAmelCase : Union[str, Any] = [3, 3, 2_7, 3]
UpperCAmelCase : str = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
UpperCAmelCase : Union[str, Any] = [3, 3, 2_7, 3]
UpperCAmelCase : List[str] = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
UpperCAmelCase : List[str] = 5_1_2
if "large" in model_name:
UpperCAmelCase : Tuple = [3, 3, 2_7, 3]
UpperCAmelCase : Optional[Any] = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
UpperCAmelCase : Union[str, Any] = 7_6_8
if "xlarge" in model_name:
UpperCAmelCase : Dict = [3, 3, 2_7, 3]
UpperCAmelCase : Optional[int] = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
UpperCAmelCase : List[Any] = 1_0_2_4
# set label information
UpperCAmelCase : Tuple = 1_5_0
UpperCAmelCase : Optional[Any] = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """ade20k-id2label.json"""
UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Optional[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = ConvNextConfig(
depths=_lowercase , hidden_sizes=_lowercase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCAmelCase : Union[str, Any] = UperNetConfig(
backbone_config=_lowercase , auxiliary_in_channels=_lowercase , num_labels=_lowercase , idalabel=_lowercase , labelaid=_lowercase , )
return config
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : Dict = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : Optional[Any] = dct.pop(_lowercase )
UpperCAmelCase : List[Any] = val
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : int = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
UpperCAmelCase : Optional[Any] = model_name_to_url[model_name]
UpperCAmelCase : int = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" )["""state_dict"""]
UpperCAmelCase : int = get_upernet_config(_lowercase )
UpperCAmelCase : List[str] = UperNetForSemanticSegmentation(_lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
if "bn" in key:
UpperCAmelCase : List[Any] = key.replace("""bn""" , """batch_norm""" )
UpperCAmelCase : str = val
# rename keys
UpperCAmelCase : int = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
model.load_state_dict(_lowercase )
# verify on image
UpperCAmelCase : Optional[Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
UpperCAmelCase : List[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert("""RGB""" )
UpperCAmelCase : int = SegformerImageProcessor()
UpperCAmelCase : Any = processor(_lowercase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
UpperCAmelCase : List[Any] = model(_lowercase )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase : Tuple = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase : List[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase : List[Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Tuple = ["""model.decoder.embed_positions.weights"""]
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
if "emb" in name:
UpperCAmelCase : List[str] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
UpperCAmelCase : Optional[int] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
UpperCAmelCase : Optional[int] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
UpperCAmelCase : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
UpperCAmelCase : Optional[Any] = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
UpperCAmelCase : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
UpperCAmelCase : Union[str, Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
UpperCAmelCase : List[str] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple[Dict, Dict]:
UpperCAmelCase : str = list(state_dict.keys() )
UpperCAmelCase : Optional[Any] = {}
for key in keys:
UpperCAmelCase : Optional[int] = state_dict.pop(lowercase_ )
UpperCAmelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Tuple = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : Optional[Any] = val
else:
UpperCAmelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( _lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1_0_2_4
UpperCAmelCase : List[str] = 2_4
UpperCAmelCase : Any = 1_6
elif checkpoint == "medium":
UpperCAmelCase : Tuple = 1_5_3_6
UpperCAmelCase : Dict = 4_8
UpperCAmelCase : Tuple = 2_4
elif checkpoint == "large":
UpperCAmelCase : int = 2_0_4_8
UpperCAmelCase : Optional[int] = 4_8
UpperCAmelCase : Dict = 3_2
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
UpperCAmelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , )
return config
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase=None , _lowercase=None , _lowercase="cpu" ) -> List[str]:
UpperCAmelCase : str = MusicGen.get_pretrained(lowercase_ , device=lowercase_ )
UpperCAmelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
UpperCAmelCase : Optional[int] = fairseq_model.lm.state_dict()
UpperCAmelCase : Optional[Any] = rename_state_dict(
lowercase_ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Tuple = TaEncoderModel.from_pretrained("""t5-base""" )
UpperCAmelCase : Union[str, Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
UpperCAmelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase : str = decoder.load_state_dict(lowercase_ , strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
UpperCAmelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ , audio_encoder=lowercase_ , decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
UpperCAmelCase : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : Tuple = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
UpperCAmelCase : int = AutoTokenizer.from_pretrained("""t5-base""" )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
UpperCAmelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
UpperCAmelCase : str = 2_0_4_8
UpperCAmelCase : str = 2_0_4_8
# set other default generation config params
UpperCAmelCase : Optional[Any] = int(3_0 * audio_encoder.config.frame_rate )
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
a : Optional[int] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 0 |
'''simple docstring'''
from collections import defaultdict
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : Optional[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCamelCase )
if ret % 2 == 0:
cuts.append(__UpperCamelCase )
return ret
def __lowerCamelCase ( ) -> Dict:
dfs(1 )
if __name__ == "__main__":
a , a : Optional[Any] = 1_0, 9
a : Dict = defaultdict(list)
a : List[Any] = {}
a : List[Any] = []
a : Union[str, Any] = 0
a : List[str] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 708 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=2 , A=3 , A=16 , A=[1, 2, 1] , A=[2, 2, 4] , A=2 , A=2.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=True , A=0.0_2 , A=1e-5 , A=True , A=None , A=True , A=10 , A=8 , A=["stage1", "stage2", "stage3"] , A=[1, 2, 3] , ) -> int:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : str = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Tuple = embed_dim
UpperCAmelCase : Union[str, Any] = depths
UpperCAmelCase : str = num_heads
UpperCAmelCase : Any = window_size
UpperCAmelCase : Union[str, Any] = mlp_ratio
UpperCAmelCase : List[Any] = qkv_bias
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Dict = drop_path_rate
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Optional[int] = use_absolute_embeddings
UpperCAmelCase : Optional[int] = patch_norm
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : List[str] = scope
UpperCAmelCase : str = use_labels
UpperCAmelCase : List[Any] = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = encoder_stride
UpperCAmelCase : Optional[int] = out_features
UpperCAmelCase : List[str] = out_indices
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase( self , A , A , A ) -> Tuple:
UpperCAmelCase : int = MaskFormerSwinModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase : List[Any] = model(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Tuple = MaskFormerSwinBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase : Dict = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = ["""stem"""]
UpperCAmelCase : Dict = MaskFormerSwinBackbone(config=_lowerCAmelCase )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[Any] = MaskFormerSwinModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[Any]:
return
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase( self ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase( self ) -> Any:
pass
def _lowercase( self ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase( self ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self , A , A , A , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : List[Any] = outputs.hidden_states
UpperCAmelCase : Any = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swin has a different seq_length
UpperCAmelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase : Any = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase( self ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase( self ) -> Tuple:
pass
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(A ):
UpperCAmelCase : str = 0
return t
def check_equivalence(A , A , A , A={} ):
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase ).to_tuple()
def recursive_check(A , A ):
if isinstance(_lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCAmelCase ) , set_nan_tensor_to_zero(_lowerCAmelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}. Dict has'''
f''' `nan`: {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}.'''
) , )
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase : Optional[Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : List[Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCAmelCase : Dict = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : str = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : str = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
UpperCAmelCase : Optional[int] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCAmelCase : str = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase_ ( unittest.TestCase , _lowerCAmelCase ):
lowercase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase = MaskFormerSwinConfig
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = MaskFormerSwinModelTester(self )
def _lowercase( self ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase : int = backbone_class(_lowerCAmelCase )
backbone.to(_lowerCAmelCase )
backbone.eval()
UpperCAmelCase : Union[str, Any] = backbone(**_lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase : Dict = backbone(**_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase : str = backbone(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 709 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( _lowercase , _lowercase = None ) -> Dict:
UpperCAmelCase : Tuple = word_bank or []
# create a table
UpperCAmelCase : int = len(snake_case_ ) + 1
UpperCAmelCase : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
UpperCAmelCase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
UpperCAmelCase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 710 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = 1_0_0_0 ) -> int:
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[int] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
a : str = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
a : Tuple = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : List[Any] = set()
UpperCAmelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : Any = char
UpperCAmelCase : List[Any] = set(lowerCamelCase_ )
return pairs
class UpperCamelCase_ ( lowerCAmelCase__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> Optional[int]:
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase : Any = vocab_file
UpperCAmelCase : Optional[int] = merges_file
UpperCAmelCase : Any = {}
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : str = 1
UpperCAmelCase : Tuple = 2
UpperCAmelCase : Dict = 3
self.add_from_file(_lowerCamelCase )
UpperCAmelCase : str = {v: k for k, v in self.encoder.items()}
with open(_lowerCamelCase , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase : Tuple = merges_handle.read().split("""\n""" )[:-1]
UpperCAmelCase : Tuple = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase : Dict = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCAmelCase : Any = {}
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Any = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase( self ) -> List[str]:
return len(self.encoder )
def _lowercase( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase( self , A ) -> List[str]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase : int = tuple(_lowerCamelCase )
UpperCAmelCase : Any = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCAmelCase : Optional[Any] = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
UpperCAmelCase : Dict = min(_lowerCamelCase , key=lambda A : self.bpe_ranks.get(_lowerCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : Dict = bigram
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
while i < len(_lowerCamelCase ):
try:
UpperCAmelCase : Tuple = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : Optional[int] = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Optional[int] = tuple(_lowerCamelCase )
UpperCAmelCase : Optional[int] = new_word
if len(_lowerCamelCase ) == 1:
break
else:
UpperCAmelCase : Optional[Any] = get_pairs(_lowerCamelCase )
UpperCAmelCase : int = """@@ """.join(_lowerCamelCase )
UpperCAmelCase : List[str] = word[:-4]
UpperCAmelCase : Dict = word
return word
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : Dict = []
UpperCAmelCase : str = re.findall(r"""\S+\n?""" , _lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(""" """ ) ) )
return split_tokens
def _lowercase( self , A ) -> Dict:
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowercase( self , A ) -> str:
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _lowercase( self , A ) -> int:
UpperCAmelCase : str = """ """.join(_lowerCamelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Dict = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.merges_file , _lowerCamelCase )
return out_vocab_file, out_merge_file
def _lowercase( self , A ) -> Tuple:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
try:
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
UpperCAmelCase : Tuple = f.readlines()
for lineTmp in lines:
UpperCAmelCase : Tuple = lineTmp.strip()
UpperCAmelCase : List[str] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected \'<token> <cnt>\'""" )
UpperCAmelCase : Dict = line[:idx]
UpperCAmelCase : List[Any] = len(self.encoder )
| 712 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a : List[str] = TypeVar("""T""")
class UpperCamelCase_ ( Generic[T] ):
def __init__( self , A , A ) -> None:
UpperCAmelCase : Any | T = None
UpperCAmelCase : int = len(A )
UpperCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
UpperCAmelCase : Optional[Any] = fnc
self.build()
def _lowercase( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _lowercase( self , A , A ) -> None:
p += self.N
UpperCAmelCase : Optional[Any] = v
while p > 1:
UpperCAmelCase : List[str] = p // 2
UpperCAmelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _lowercase( self , A , A ) -> T | None: # noqa: E741
UpperCAmelCase : List[str] = l + self.N, r + self.N
UpperCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase : List[str] = self.st[l] if res is None else self.fn(A , self.st[l] )
if r % 2 == 0:
UpperCAmelCase : Union[str, Any] = self.st[r] if res is None else self.fn(A , self.st[r] )
UpperCAmelCase : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a : Optional[Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
a : Dict = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
a : Optional[int] = SegmentTree(test_array, min)
a : Dict = SegmentTree(test_array, max)
a : Dict = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCamelCase ( ) -> None:
for i in range(len(__snake_case ) ):
for j in range(__snake_case , len(__snake_case ) ):
UpperCAmelCase : Optional[Any] = reduce(__snake_case , test_array[i : j + 1] )
UpperCAmelCase : str = reduce(__snake_case , test_array[i : j + 1] )
UpperCAmelCase : List[str] = reduce(lambda _lowercase , _lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__snake_case , __snake_case )
assert max_range == max_segment_tree.query(__snake_case , __snake_case )
assert sum_range == sum_segment_tree.query(__snake_case , __snake_case )
test_all_segments()
for index, value in test_updates.items():
a : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 713 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
def update_area_of_max_square(_lowercase , _lowercase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCAmelCase : Any = update_area_of_max_square(a__ , col + 1 )
UpperCAmelCase : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
UpperCAmelCase : Union[str, Any] = update_area_of_max_square(row + 1 , a__ )
if mat[row][col]:
UpperCAmelCase : List[str] = 1 + min([right, diagonal, down] )
UpperCAmelCase : Optional[int] = max(largest_square_area[0] , a__ )
return sub_problem_sol
else:
return 0
UpperCAmelCase : Any = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
def update_area_of_max_square_using_dp_array(
_lowercase , _lowercase , _lowercase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCAmelCase : str = update_area_of_max_square_using_dp_array(a__ , col + 1 , a__ )
UpperCAmelCase : Any = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , a__ )
UpperCAmelCase : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , a__ , a__ )
if mat[row][col]:
UpperCAmelCase : Dict = 1 + min([right, diagonal, down] )
UpperCAmelCase : Any = max(largest_square_area[0] , a__ )
UpperCAmelCase : Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCAmelCase : Any = [0]
UpperCAmelCase : Any = [[-1] * cols for _ in range(a__ )]
update_area_of_max_square_using_dp_array(0 , 0 , a__ )
return largest_square_area[0]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : List[str] = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCAmelCase : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase : Optional[int] = dp_array[row][col + 1]
UpperCAmelCase : List[str] = dp_array[row + 1][col + 1]
UpperCAmelCase : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCAmelCase : Tuple = 1 + min(a__ , a__ , a__ )
UpperCAmelCase : Tuple = max(dp_array[row][col] , a__ )
else:
UpperCAmelCase : Union[str, Any] = 0
return largest_square_area
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : Dict = [0] * (cols + 1)
UpperCAmelCase : Optional[int] = [0] * (cols + 1)
UpperCAmelCase : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase : Tuple = current_row[col + 1]
UpperCAmelCase : Optional[Any] = next_row[col + 1]
UpperCAmelCase : Any = next_row[col]
if mat[row][col] == 1:
UpperCAmelCase : Union[str, Any] = 1 + min(a__ , a__ , a__ )
UpperCAmelCase : Optional[int] = max(current_row[col] , a__ )
else:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 714 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 0 |
from math import ceil, sqrt
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0 ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
UpperCAmelCase : Optional[int] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
UpperCAmelCase : Dict = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : Union[str, Any] = logging.get_logger(__name__)
a : Optional[int] = {'''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class UpperCamelCase_ ( __UpperCAmelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['''input_ids''', '''attention_mask''']
lowercase = None
def __init__( self , A=None , A=None , A=None , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A=False , A=False , **A , ) -> Optional[Any]:
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = add_prefix_space
def _lowercase( self , *A , **A ) -> str:
UpperCAmelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase( self , *A , **A ) -> int:
UpperCAmelCase : Dict = kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase( self , A , A = None ) -> int:
UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase( self , A ) -> str:
UpperCAmelCase : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCAmelCase : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 716 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : Tuple = {"""vocab_file""": """spiece.model"""}
a : int = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
a : Tuple = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
class UpperCamelCase_ ( UpperCamelCase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = []
def __init__( self , A , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A="[SEP]" , A="[MASK]" , A="[CLS]" , A = None , **A , ) -> None:
UpperCAmelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
UpperCAmelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
UpperCAmelCase : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
UpperCAmelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
UpperCAmelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
UpperCAmelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sep_token=__a , mask_token=__a , cls_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCAmelCase : Optional[int] = vocab_file
UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def _lowercase( self ) -> List[str]:
return self.sp_model.get_piece_size()
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
UpperCAmelCase : str = self.__dict__.copy()
UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
UpperCAmelCase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : int = {}
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase( self , A ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def _lowercase( self , A ) -> List[str]:
return self.sp_model.piece_to_id(__a )
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.sp_model.IdToPiece(__a )
return token
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : str = []
UpperCAmelCase : Dict = ''
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
UpperCAmelCase : int = True
UpperCAmelCase : str = []
else:
current_sub_tokens.append(__a )
UpperCAmelCase : Any = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def _lowercase( self , A , A = False , A = None , A = True , **A , ) -> str:
UpperCAmelCase : Optional[Any] = kwargs.pop("""use_source_tokenizer""" , __a )
UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(__a , skip_special_tokens=__a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase : Tuple = []
UpperCAmelCase : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
UpperCAmelCase : Optional[int] = []
sub_texts.append(__a )
else:
current_sub_text.append(__a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase : List[str] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(__a ) )
else:
UpperCAmelCase : Optional[Any] = ''.join(__a )
UpperCAmelCase : Optional[int] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase : Union[str, Any] = self.clean_up_tokenization(__a )
return clean_text
else:
return text
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Any = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
UpperCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( _lowercase , _lowercase=1_0 ) -> Optional[int]:
UpperCAmelCase : Any = []
for _ in range(_lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( _lowercase , _lowercase=1_0 ) -> Tuple:
UpperCAmelCase : List[str] = []
for step in range(_lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Union[str, Any] = os.path.join(_lowercase , """schedule.bin""" )
torch.save(scheduler.state_dict() , _lowercase )
UpperCAmelCase : Dict = torch.load(_lowercase )
scheduler.load_state_dict(_lowercase )
return lrs
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A , A ) -> Optional[int]:
self.assertEqual(len(__A ) , len(__A ) )
for a, b in zip(__A , __A ):
self.assertAlmostEqual(__A , __A , delta=__A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__A )
UpperCAmelCase : Dict = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[Any] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
UpperCAmelCase : Optional[Any] = criterion(__A , __A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__A )
UpperCAmelCase : Optional[int] = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : Union[str, Any] = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__A , weight_decay=0.0 , relative_step=__A , scale_parameter=__A , warmup_init=__A , )
for _ in range(1000 ):
UpperCAmelCase : str = criterion(__A , __A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase = 10
def _lowercase( self , A , A , A , A=None ) -> List[Any]:
self.assertEqual(len(__A ) , len(__A ) )
for a, b in zip(__A , __A ):
self.assertAlmostEqual(__A , __A , delta=__A , msg=__A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : List[str] = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Tuple = data
UpperCAmelCase : str = scheduler_func(self.optimizer , **__A )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : Dict = unwrap_schedule(__A , self.num_steps )
self.assertListAlmostEqual(
__A , __A , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **__A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__A ) # wrap to test picklability of the schedule
UpperCAmelCase : List[Any] = unwrap_and_save_reload_schedule(__A , self.num_steps )
self.assertListEqual(__A , __A , msg=f'''failed for {scheduler_func} in save and reload''' )
class UpperCamelCase_ :
def __init__( self , A ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = fn
def __call__( self , *A , **A ) -> Optional[int]:
return self.fn(*__A , **__A )
@classmethod
def _lowercase( self , A ) -> List[str]:
UpperCAmelCase : Optional[Any] = list(map(self , scheduler.lr_lambdas ) )
| 718 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowercase( self , A , A , A ) -> Optional[int]:
UpperCAmelCase : str = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCAmelCase : str = VideoClassificationPipeline(model=_lowercase , image_processor=_lowercase , top_k=2 )
UpperCAmelCase : Any = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _lowercase( self , A , A ) -> Tuple:
for example in examples:
UpperCAmelCase : Dict = video_classifier(_lowercase )
self.assertEqual(
_lowercase , [
{"""score""": ANY(_lowercase ), """label""": ANY(_lowercase )},
{"""score""": ANY(_lowercase ), """label""": ANY(_lowercase )},
] , )
@require_torch
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[Any] = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
UpperCAmelCase : Tuple = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
UpperCAmelCase : Any = pipeline(
"""video-classification""" , model=_lowercase , feature_extractor=_lowercase , frame_sampling_rate=4 )
UpperCAmelCase : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCAmelCase : str = video_classifier(_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}] , )
UpperCAmelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
] , )
@require_tf
def _lowercase( self ) -> Optional[Any]:
pass
| 719 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a : Optional[Any] = logging.getLogger()
def __lowerCamelCase ( ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
UpperCAmelCase : Tuple = parser.parse_args()
return args.f
def __lowerCamelCase ( _lowercase , _lowercase="eval" ) -> Dict:
UpperCAmelCase : List[Any] = os.path.join(lowercase_ , F'''{split}_results.json''' )
if os.path.exists(lowercase_ ):
with open(lowercase_ , """r""" ) as f:
return json.load(lowercase_ )
raise ValueError(F'''can\'t find {path}''' )
a : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
UpperCAmelCase : str = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_flax_glue.main()
UpperCAmelCase : Tuple = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
@slow
def _lowercase( self ) -> str:
UpperCAmelCase : List[str] = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Tuple = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_clm_flax.main()
UpperCAmelCase : int = get_results(A__ )
self.assertLess(result["""eval_perplexity"""] , 100 )
@slow
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Union[str, Any] = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_summarization_flax.main()
UpperCAmelCase : List[Any] = get_results(A__ , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def _lowercase( self ) -> Dict:
UpperCAmelCase : int = self.get_auto_remove_tmp_dir()
UpperCAmelCase : int = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_mlm_flax.main()
UpperCAmelCase : str = get_results(A__ )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Union[str, Any] = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_ta_mlm_flax.main()
UpperCAmelCase : str = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.4_2 )
@slow
def _lowercase( self ) -> Dict:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase : Any = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase : int = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_flax_ner.main()
UpperCAmelCase : Union[str, Any] = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Tuple = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(A__ , """argv""" , A__ ):
run_qa.main()
UpperCAmelCase : Tuple = get_results(A__ )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 720 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 0 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __lowerCamelCase ( _lowercase ) -> Optional[int]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __lowerCamelCase ( ) -> Any:
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCAmelCase : Dict = [1, 2, 3]
with pytest.raises(lowerCAmelCase__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=2 )
with pytest.raises(lowerCAmelCase__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Tuple = [1, 2]
UpperCAmelCase : Optional[Any] = {"""a""": 1, """b""": 2}
UpperCAmelCase : Union[str, Any] = {"""a""": [1, 2], """b""": [3, 4]}
UpperCAmelCase : Optional[Any] = {"""a""": {"""1""": 1}, """b""": 2}
UpperCAmelCase : int = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCAmelCase : Tuple = [2, 3]
UpperCAmelCase : Union[str, Any] = {"""a""": 2, """b""": 3}
UpperCAmelCase : Tuple = {"""a""": [2, 3], """b""": [4, 5]}
UpperCAmelCase : Dict = {"""a""": {"""1""": 2}, """b""": 3}
UpperCAmelCase : List[str] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
| 721 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a : List[str] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
return (preds == labels).mean()
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
UpperCAmelCase : Union[str, Any] = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase : Optional[int] = fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
UpperCAmelCase : Any = pearsonr(_lowerCamelCase , _lowerCamelCase )[0]
UpperCAmelCase : Any = spearmanr(_lowerCamelCase , _lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), F'''Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCamelCase , _lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(_lowerCamelCase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(_lowerCamelCase )
| 700 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Optional[int] = """Hello world! cécé herlolip"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = FairseqRobertaModel.from_pretrained(_lowercase )
roberta.eval() # disable dropout
UpperCAmelCase : int = roberta.model.encoder.sentence_encoder
UpperCAmelCase : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCAmelCase : Tuple = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , _lowercase )
UpperCAmelCase : List[str] = XLMRobertaXLForSequenceClassification(_lowercase ) if classification_head else XLMRobertaXLForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : str = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase : Any = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase : List[str] = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase : Any = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : str = model.roberta.encoder.layer[i]
UpperCAmelCase : Tuple = roberta_sent_encoder.layers[i]
UpperCAmelCase : Optional[Any] = layer.attention
UpperCAmelCase : Optional[int] = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase : Optional[int] = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase : int = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase : List[str] = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase : str = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase : Union[str, Any] = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase : Any = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase : str = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : List[Any] = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase : Any = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase : int = roberta_layer.final_layer_norm.weight
UpperCAmelCase : List[Any] = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase : List[Any] = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase : List[Any] = roberta_layer.fca.weight
UpperCAmelCase : List[str] = roberta_layer.fca.bias
# output
UpperCAmelCase : Dict = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase : Dict = roberta_layer.fca.weight
UpperCAmelCase : List[str] = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase : Union[str, Any] = roberta.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase : Any = roberta.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase : Optional[int] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase : Tuple = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : Tuple = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : Optional[Any] = roberta.model.encoder.lm_head.weight
UpperCAmelCase : str = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : List[str] = roberta.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase : Optional[int] = model(_lowercase )[0]
if classification_head:
UpperCAmelCase : int = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_lowercase ) )
else:
UpperCAmelCase : Tuple = roberta.model(_lowercase )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase : Dict = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a : int = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 701 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : Union[str, Any] = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( _UpperCAmelCase ):
lowercase = 'camembert'
def __init__( self , A=30522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> List[Any]:
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : List[str] = type_vocab_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : List[Any] = position_embedding_type
UpperCAmelCase : Any = use_cache
UpperCAmelCase : Optional[int] = classifier_dropout
class UpperCamelCase_ ( _UpperCAmelCase ):
@property
def _lowercase( self ) -> Tuple:
if self.task == "multiple-choice":
UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 702 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : Dict = 2_5_0_0_0_4
a : Dict = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartaaTokenizer
lowercase = MBartaaTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> str:
UpperCAmelCase : Tuple = '<s>'
UpperCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1054 )
def _lowercase( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
UpperCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
UpperCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def _lowercase( self ) -> Dict:
UpperCAmelCase : str = {'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def _lowercase( self ) -> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Any = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : List[str] = tempfile.mkdtemp()
UpperCAmelCase : int = tokenizer_r.save_pretrained(A )
UpperCAmelCase : List[Any] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Tuple = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Dict = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Tuple = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Any = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Union[str, Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-50-one-to-many-mmt'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : Any = 1
return cls
def _lowercase( self ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250038 )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[Any]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : Optional[int] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : str = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : Any = 10
UpperCAmelCase : Dict = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250053, 250001] )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : List[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : List[str] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> str:
UpperCAmelCase : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Dict:
UpperCAmelCase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowercase( self ) -> int:
UpperCAmelCase : Dict = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Any = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Tuple = targets['input_ids']
UpperCAmelCase : Optional[Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250004, 62, 3034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 703 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : str = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __A ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : str = initializer_range
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : Tuple = use_tpu_fourier_optimizations
UpperCAmelCase : Optional[Any] = tpu_short_seq_length
| 704 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase_ ( __lowercase , unittest.TestCase ):
lowercase = PhobertTokenizer
lowercase = False
def _lowercase( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : Optional[int] = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
UpperCAmelCase : Dict = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase : List[Any] = ["""#version: 0.2""", """l à</w>"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
def _lowercase( self , **A ) -> Dict:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_A )
def _lowercase( self , A ) -> Dict:
UpperCAmelCase : str = """Tôi là VinAI Research"""
UpperCAmelCase : int = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def _lowercase( self ) -> str:
UpperCAmelCase : Dict = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """Tôi là VinAI Research"""
UpperCAmelCase : List[str] = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
UpperCAmelCase : str = tokenizer.tokenize(_A )
print(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase : List[str] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 705 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a : Union[str, Any] = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
a : Optional[Any] = namedtuple("""covid_data""", """cases deaths recovered""")
def __lowerCamelCase ( _lowercase = "https://www.worldometers.info/coronavirus/" ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(lowerCAmelCase_ ).content ).xpath(lowerCAmelCase_ ) )
a : Tuple = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 0 |
'''simple docstring'''
import string
import numpy
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
return b if a == 0 else greatest_common_divisor(b % a , _snake_case )
class UpperCamelCase_ :
lowercase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase = numpy.vectorize(lambda __magic_name__ : x % 36 )
lowercase = numpy.vectorize(__magic_name__ )
def __init__( self , A ) -> str:
UpperCAmelCase : Optional[Any] = self.modulus(A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase : Any = encrypt_key.shape[0]
def _lowercase( self , A ) -> Union[str, Any]:
return self.key_string.index(A )
def _lowercase( self , A ) -> List[str]:
return self.key_string[round(A )]
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase : int = det % len(self.key_string )
UpperCAmelCase : Any = len(self.key_string )
if greatest_common_divisor(A , len(self.key_string ) ) != 1:
UpperCAmelCase : List[str] = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Any = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase : Tuple = chars[-1]
while len(A ) % self.break_key != 0:
chars.append(A )
return "".join(A )
def _lowercase( self , A ) -> List[str]:
UpperCAmelCase : Optional[int] = self.process_text(text.upper() )
UpperCAmelCase : str = """"""
for i in range(0 , len(A ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase : Any = text[i : i + self.break_key]
UpperCAmelCase : List[Any] = [self.replace_letters(A ) for char in batch]
UpperCAmelCase : List[str] = numpy.array([vec] ).T
UpperCAmelCase : Dict = self.modulus(self.encrypt_key.dot(A ) ).T.tolist()[
0
]
UpperCAmelCase : Union[str, Any] = """""".join(
self.replace_digits(A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase : Optional[int] = det % len(self.key_string )
UpperCAmelCase : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCAmelCase : List[Any] = i
break
UpperCAmelCase : List[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(A ) )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Union[str, Any] = self.make_decrypt_key()
UpperCAmelCase : Optional[int] = self.process_text(text.upper() )
UpperCAmelCase : Optional[Any] = """"""
for i in range(0 , len(A ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase : Optional[Any] = text[i : i + self.break_key]
UpperCAmelCase : str = [self.replace_letters(A ) for char in batch]
UpperCAmelCase : Any = numpy.array([vec] ).T
UpperCAmelCase : Union[str, Any] = self.modulus(decrypt_key.dot(A ) ).T.tolist()[0]
UpperCAmelCase : Any = """""".join(
self.replace_digits(A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : Optional[Any] = int(input("""Enter the order of the encryption key: """ ) )
UpperCAmelCase : Dict = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_snake_case ):
UpperCAmelCase : Dict = [int(_snake_case ) for x in input().split()]
hill_matrix.append(_snake_case )
UpperCAmelCase : Tuple = HillCipher(numpy.array(_snake_case ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
UpperCAmelCase : List[str] = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
UpperCAmelCase : Tuple = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_snake_case ) )
elif option == "2":
UpperCAmelCase : Optional[int] = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 708 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase , UpperCAmelCase : Tuple = image.size
UpperCAmelCase , UpperCAmelCase : Optional[Any] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase : int = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
UpperCAmelCase : Tuple = np.array(_A ).astype(np.floataa ) / 2_5_5.0
UpperCAmelCase : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase : Dict = torch.from_numpy(_A )
return 2.0 * image - 1.0
class UpperCamelCase_ ( lowerCAmelCase__ ):
def __init__( self , A , A , A , ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , A = None , A = 1 , A = 100 , A = 0.0 , A = None , A = "pil" , A = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCAmelCase : int = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCAmelCase : Tuple = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_SCREAMING_SNAKE_CASE )}''' )
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCAmelCase : Optional[int] = preprocess(_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase : Tuple = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase : Union[str, Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase : Optional[int] = next(self.unet.parameters() ).dtype
UpperCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = image.to(device=self.device , dtype=_SCREAMING_SNAKE_CASE )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
UpperCAmelCase : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Any = {}
if accepts_eta:
UpperCAmelCase : Optional[Any] = eta
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase : Union[str, Any] = torch.cat([latents, image] , dim=1 )
UpperCAmelCase : List[str] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# predict the noise residual
UpperCAmelCase : Any = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Any = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase : str = self.vqvae.decode(_SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Optional[int] = torch.clamp(_SCREAMING_SNAKE_CASE , -1.0 , 1.0 )
UpperCAmelCase : Tuple = image / 2 + 0.5
UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 709 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = """hf-internal-testing/tiny-random-t5"""
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase : Dict = tokenizer("""This is me""" , return_tensors="""pt""" )
UpperCAmelCase : Tuple = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase : int = model.generate(**UpperCamelCase__ )
UpperCAmelCase : Optional[Any] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase : int = model_reloaded.generate(**UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[int] = """hf-internal-testing/tiny-random-t5"""
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase__ ):
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase : Any = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase__ )
| 710 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Tuple = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCamelCase_ ( snake_case__ ):
lowercase = '''trocr'''
lowercase = ['''past_key_values''']
lowercase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , A=50265 , A=1024 , A=12 , A=16 , A=4096 , A="gelu" , A=512 , A=0.1 , A=0.0 , A=0.0 , A=2 , A=0.0_2 , A=0.0 , A=True , A=False , A=True , A=True , A=1 , A=0 , A=2 , **A , ) -> Dict:
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Union[str, Any] = d_model
UpperCAmelCase : str = decoder_layers
UpperCAmelCase : Dict = decoder_attention_heads
UpperCAmelCase : Optional[int] = decoder_ffn_dim
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Tuple = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : Union[str, Any] = activation_dropout
UpperCAmelCase : str = init_std
UpperCAmelCase : List[Any] = decoder_layerdrop
UpperCAmelCase : List[Any] = use_cache
UpperCAmelCase : Any = scale_embedding
UpperCAmelCase : Optional[int] = use_learned_position_embeddings
UpperCAmelCase : Tuple = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : Dict = filter(lambda _lowercase : p.requires_grad , model.parameters() )
UpperCAmelCase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a : Optional[Any] = logging.getLogger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]:
if metric == "rouge2":
UpperCAmelCase : Dict = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
UpperCAmelCase : str = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
UpperCAmelCase : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
UpperCAmelCase : Dict = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=__snake_case , verbose=__snake_case , )
class UpperCamelCase_ ( pl.Callback ):
def _lowercase( self , A , A ) -> int:
UpperCAmelCase : Tuple = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def _lowercase( self , A , A , A , A=True ) -> None:
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCAmelCase : Union[str, Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
UpperCAmelCase : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase : int = od / """test_results.txt"""
UpperCAmelCase : Tuple = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase : List[Any] = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCAmelCase : str = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , """a+""" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase : Dict = metrics[key]
if isinstance(__a , torch.Tensor ):
UpperCAmelCase : str = val.item()
UpperCAmelCase : Dict = f'''{key}: {val:.6f}\n'''
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase : Optional[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(__a )
@rank_zero_only
def _lowercase( self , A , A ) -> List[str]:
try:
UpperCAmelCase : List[Any] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase : Tuple = pl_module.model.num_parameters()
UpperCAmelCase : Optional[Any] = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _lowercase( self , A , A ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , """test""" )
@rank_zero_only
def _lowercase( self , A , A ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 712 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 0 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a : Union[str, Any] = logging.getLogger(__name__)
a : str = """Hello world! cécé herlolip"""
a : Any = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCAmelCase : Tuple = BertAbsConfig(
temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCAmelCase : int = torch.load(lowerCamelCase_ , lambda _lowercase , _lowercase : storage )
UpperCAmelCase : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ )
original.eval()
UpperCAmelCase : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase : int = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowerCamelCase_ )) )
UpperCAmelCase : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
UpperCAmelCase : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowerCamelCase_ )) )
UpperCAmelCase : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase : Optional[int] = encoder_input_ids
UpperCAmelCase : Optional[Any] = decoder_input_ids
UpperCAmelCase : List[str] = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : int = None
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
UpperCAmelCase : Optional[Any] = original.generator(lowerCamelCase_ )
UpperCAmelCase : List[Any] = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
UpperCAmelCase : str = new_model.generator(lowerCamelCase_ )
UpperCAmelCase : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
UpperCAmelCase : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
UpperCAmelCase : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
a : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 713 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCamelCase_ :
lowercase = BlenderbotConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Optional[int] = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : Tuple = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Tuple = eos_token_id
UpperCAmelCase : Any = pad_token_id
UpperCAmelCase : Dict = bos_token_id
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : int = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def _lowercase( self , A , A ) -> Dict:
UpperCAmelCase : List[Any] = TFBlenderbotModel(config=UpperCamelCase_ ).get_decoder()
UpperCAmelCase : List[Any] = inputs_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids[:1, :]
UpperCAmelCase : Any = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase : Dict = inputs_dict["""head_mask"""]
UpperCAmelCase : str = 1
# first forward pass
UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
UpperCAmelCase , UpperCAmelCase : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : int = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
UpperCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-3 )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[int]:
if attention_mask is None:
UpperCAmelCase : Optional[int] = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowercase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def _lowercase( self ) -> Any:
UpperCAmelCase : Dict = TFBlenderbotModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ )
def _lowercase( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = ['My friends are cool but they eat too many carbs.']
lowercase = 'facebook/blenderbot-400M-distill'
@cached_property
def _lowercase( self ) -> Optional[int]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase( self ) -> str:
UpperCAmelCase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.tokenizer(self.src_text , return_tensors="""tf""" )
UpperCAmelCase : Optional[Any] = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase : Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 714 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( _UpperCamelCase ):
lowercase = (UnCLIPScheduler,)
def _lowercase( self , **A ) -> int:
UpperCAmelCase : Dict = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**A )
return config
def _lowercase( self ) -> Optional[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A )
def _lowercase( self ) -> List[str]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=A )
def _lowercase( self ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def _lowercase( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=A )
def _lowercase( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=A )
def _lowercase( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=A , prev_timestep=A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase : List[Any] = self.get_scheduler_config(variance_type="""fixed_small_log""" )
UpperCAmelCase : Optional[Any] = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = self.scheduler_classes[0]
UpperCAmelCase : str = self.get_scheduler_config(variance_type="""learned_range""" )
UpperCAmelCase : Dict = scheduler_class(**A )
UpperCAmelCase : Tuple = 0.5
assert scheduler._get_variance(1 , predicted_variance=A ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=A ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=A ) - -0.0_0_1_0_0_1_1 < 1e-5
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Dict = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**A )
UpperCAmelCase : List[str] = scheduler.timesteps
UpperCAmelCase : Optional[Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(A ):
# 1. predict noise residual
UpperCAmelCase : List[str] = model(A , A )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : Any = scheduler.step(A , A , A , generator=A ).prev_sample
UpperCAmelCase : Tuple = pred_prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(A ) )
UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**A )
scheduler.set_timesteps(25 )
UpperCAmelCase : int = scheduler.timesteps
UpperCAmelCase : List[Any] = self.dummy_model()
UpperCAmelCase : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase : str = torch.manual_seed(0 )
for i, t in enumerate(A ):
# 1. predict noise residual
UpperCAmelCase : List[str] = model(A , A )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase : Union[str, Any] = None
else:
UpperCAmelCase : Tuple = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : int = scheduler.step(
A , A , A , prev_timestep=A , generator=A ).prev_sample
UpperCAmelCase : Dict = pred_prev_sample
UpperCAmelCase : List[Any] = torch.sum(torch.abs(A ) )
UpperCAmelCase : str = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def _lowercase( self ) -> List[Any]:
pass
def _lowercase( self ) -> Optional[Any]:
pass
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
UpperCAmelCase : Any = BigBirdConfig.from_json_file(a__ )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
UpperCAmelCase : Union[str, Any] = BigBirdForQuestionAnswering(a__ )
else:
UpperCAmelCase : Any = BigBirdForPreTraining(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(a__ , a__ , is_trivia_qa=a__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(a__ )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 716 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 0 |
'''simple docstring'''
def __lowerCamelCase ( ) -> List[str]:
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : List[str] = 2
while i * i <= n:
UpperCAmelCase : Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> Tuple:
return next(i for i in triangle_number_generator() if count_divisors(snake_case__ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.ndarray:
UpperCAmelCase : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Union[str, Any] = np.zeros((n + 1,) )
UpperCAmelCase : Any = ya
UpperCAmelCase : Tuple = xa
for k in range(_lowercase ):
UpperCAmelCase : Tuple = y[k] + step_size * ode_func(_lowercase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 719 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 0 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
a : Tuple = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(__a ) , version.parse(__a ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __lowerCamelCase ( _lowercase , _lowercase = None ) -> List[str]:
UpperCAmelCase : List[str] = F'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , __a ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = requirement, None, None
else:
UpperCAmelCase : Dict = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F''' got {requirement}''' )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = match[0]
UpperCAmelCase : str = want_full.split(""",""" ) # there could be multiple requirements
UpperCAmelCase : Any = {}
for w in want_range:
UpperCAmelCase : Optional[Any] = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F''' but got {requirement}''' )
UpperCAmelCase , UpperCAmelCase : List[Any] = match[0]
UpperCAmelCase : List[str] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
UpperCAmelCase : Any = """.""".join([str(__a ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
return
# check if any version is installed
try:
UpperCAmelCase : Union[str, Any] = importlib.metadata.version(__a )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Optional[int] = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(__a , __a )
| 720 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A ) -> str:
UpperCAmelCase : Tuple = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Dict = None
UpperCAmelCase : int = 20
UpperCAmelCase : Tuple = self._get_uniform_logits(batch_size=2 , length=snake_case_ )
# tweak scores to not be uniform anymore
UpperCAmelCase : Optional[Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase : Union[str, Any] = jax.nn.softmax(snake_case_ , axis=-1 )
UpperCAmelCase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(snake_case_ , scores.copy() , cur_len=snake_case_ ) , axis=-1 )
UpperCAmelCase : Dict = jax.nn.softmax(temp_dist_warper_smoother(snake_case_ , scores.copy() , cur_len=snake_case_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Tuple = 10
UpperCAmelCase : str = 2
# create ramp distribution
UpperCAmelCase : Any = np.broadcast_to(np.arange(snake_case_ )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : Dict = top_k_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase : Any = 5
UpperCAmelCase : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase : List[str] = np.broadcast_to(np.arange(snake_case_ )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase : str = top_k_warp_safety_check(snake_case_ , snake_case_ , cur_len=snake_case_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = None
UpperCAmelCase : Any = 10
UpperCAmelCase : str = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase : List[str] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
UpperCAmelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase : Tuple = np.exp(top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase : Optional[int] = np.broadcast_to(np.arange(snake_case_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase : Dict = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
UpperCAmelCase : Any = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase : int = top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = 20
UpperCAmelCase : Dict = 4
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case_ )
# check that min length is applied at length 5
UpperCAmelCase : Optional[int] = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase : int = 5
UpperCAmelCase : int = self._get_uniform_logits(snake_case_ , snake_case_ )
UpperCAmelCase : Any = min_dist_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase : Optional[int] = self._get_uniform_logits(snake_case_ , snake_case_ )
UpperCAmelCase : Union[str, Any] = 15
UpperCAmelCase : Optional[Any] = min_dist_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertFalse(jnp.isinf(snake_case_ ).any() )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = 20
UpperCAmelCase : Dict = 4
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case_ )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase : List[str] = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Any = self._get_uniform_logits(snake_case_ , snake_case_ )
UpperCAmelCase : List[Any] = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase : Optional[Any] = 3
UpperCAmelCase : Dict = self._get_uniform_logits(snake_case_ , snake_case_ )
UpperCAmelCase : List[str] = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertFalse(jnp.isinf(snake_case_ ).any() )
def _lowercase( self ) -> Dict:
UpperCAmelCase : List[Any] = 20
UpperCAmelCase : List[str] = 4
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Dict = 5
UpperCAmelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case_ , eos_token_id=snake_case_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase : List[str] = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase : Union[str, Any] = 4
UpperCAmelCase : Optional[Any] = self._get_uniform_logits(snake_case_ , snake_case_ )
UpperCAmelCase : Optional[Any] = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase : Union[str, Any] = 3
UpperCAmelCase : List[Any] = self._get_uniform_logits(snake_case_ , snake_case_ )
UpperCAmelCase : int = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertFalse(jnp.isinf(snake_case_ ).any() )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = 4
UpperCAmelCase : Any = 10
UpperCAmelCase : List[Any] = 15
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[Any] = 15
# dummy input_ids and scores
UpperCAmelCase : Any = ids_tensor((batch_size, sequence_length) , snake_case_ )
UpperCAmelCase : int = input_ids.copy()
UpperCAmelCase : Optional[int] = self._get_uniform_logits(snake_case_ , snake_case_ )
UpperCAmelCase : Tuple = scores.copy()
# instantiate all dist processors
UpperCAmelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Dict = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case_ )
UpperCAmelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case_ )
UpperCAmelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case_ , eos_token_id=snake_case_ )
UpperCAmelCase : Any = 10
# no processor list
UpperCAmelCase : List[str] = temp_dist_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : Tuple = top_k_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : int = top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : Optional[int] = min_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : Union[str, Any] = bos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : List[str] = eos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
# with processor list
UpperCAmelCase : int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase : Any = processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[Any] = 4
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Optional[int] = 15
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : Optional[Any] = 15
# dummy input_ids and scores
UpperCAmelCase : Any = ids_tensor((batch_size, sequence_length) , snake_case_ )
UpperCAmelCase : Dict = input_ids.copy()
UpperCAmelCase : Tuple = self._get_uniform_logits(snake_case_ , snake_case_ )
UpperCAmelCase : int = scores.copy()
# instantiate all dist processors
UpperCAmelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase : str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case_ )
UpperCAmelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case_ )
UpperCAmelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case_ , eos_token_id=snake_case_ )
UpperCAmelCase : Dict = 10
# no processor list
def run_no_processor_list(A , A , A ):
UpperCAmelCase : Optional[int] = temp_dist_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : str = top_k_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : int = top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : Tuple = min_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : Optional[Any] = bos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
UpperCAmelCase : Optional[Any] = eos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
return scores
# with processor list
def run_processor_list(A , A , A ):
UpperCAmelCase : int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase : Optional[int] = processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
return scores
UpperCAmelCase : Tuple = jax.jit(snake_case_ )
UpperCAmelCase : List[str] = jax.jit(snake_case_ )
UpperCAmelCase : int = jitted_run_no_processor_list(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase : int = jitted_run_processor_list(snake_case_ , snake_case_ , snake_case_ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 721 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 0 |
'''simple docstring'''
import numpy as np
def __lowerCamelCase ( _lowercase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( _lowercase ) -> np.ndarray:
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase_ ( __UpperCAmelCase ):
def _lowercase( self ) -> str:
UpperCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , """width_multiplier""" ) )
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=64 , A=2 , A=3 , A="swish" , A=3 , A=32 , A=0.1 , A=0.0_2 , A=True , A=True , A=10 , A=None , A=0.2_5 , A=0.0 , A=0.0 , ) -> str:
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Any = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Dict = conv_kernel_size
UpperCAmelCase : List[str] = output_stride
UpperCAmelCase : List[Any] = classifier_dropout_prob
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Optional[Any] = width_multiplier
UpperCAmelCase : Union[str, Any] = ffn_dropout
UpperCAmelCase : int = attn_dropout
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Tuple = None
if self.use_labels:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase( self ) -> List[Any]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowercase( self , A , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : int = MobileViTVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase( self , A , A , A , A ) -> Optional[Any]:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : List[Any] = MobileViTVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase : Optional[int] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A ) -> Dict:
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Dict = MobileViTVaForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase : List[str] = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase( self ) -> str:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = MobileViTVaModelTester(self )
UpperCAmelCase : Any = MobileViTVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _lowercase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def _lowercase( self ) -> List[Any]:
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def _lowercase( self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def _lowercase( self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase( self ) -> Optional[Any]:
pass
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(_lowerCamelCase )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Dict = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _lowercase( self ) -> List[Any]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : int = 5
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase : Tuple = 2
for i in range(len(_lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def _lowercase( self ) -> str:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = MobileViTVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
UpperCAmelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
_lowerCamelCase )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**_lowerCamelCase )
# verify the logits
UpperCAmelCase : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase : Tuple = model.to(_lowerCamelCase )
UpperCAmelCase : List[Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase : int = model(**_lowerCamelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCAmelCase : int = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase : Optional[Any] = model.to(_lowerCamelCase )
UpperCAmelCase : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase : Tuple = model(**_lowerCamelCase )
UpperCAmelCase : Tuple = outputs.logits.detach().cpu()
UpperCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(50, 60)] )
UpperCAmelCase : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
UpperCAmelCase : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
UpperCAmelCase : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 701 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 0 |
'''simple docstring'''
from torch import nn
class UpperCamelCase_ ( nn.Module ):
def __init__( self , A , A ) -> Dict:
super().__init__()
UpperCAmelCase : Any = class_size
UpperCAmelCase : Optional[int] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCAmelCase : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase )
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Any = self.mlp(__lowerCamelCase )
return logits
| 702 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
@staticmethod
def _lowercase( *A , **A ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : int = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCAmelCase : Tuple = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _lowercase( self , A , A ) -> int:
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Optional[Any] = len(_lowercase )
self.assertGreater(_lowercase , 0 )
self.assertEqual(
_lowercase , [
{
"""score""": ANY(_lowercase ),
"""label""": ANY(_lowercase ),
"""box""": {"""xmin""": ANY(_lowercase ), """ymin""": ANY(_lowercase ), """xmax""": ANY(_lowercase ), """ymax""": ANY(_lowercase )},
}
for i in range(_lowercase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _lowercase( self ) -> Optional[Any]:
pass
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCAmelCase : Any = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
UpperCAmelCase : int = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = pipeline("""zero-shot-object-detection""" )
UpperCAmelCase : List[str] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
UpperCAmelCase : List[Any] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _lowercase( self ) -> Union[str, Any]:
pass
@require_torch
@slow
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = 0.2
UpperCAmelCase : Dict = pipeline("""zero-shot-object-detection""" )
UpperCAmelCase : Tuple = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" )
UpperCAmelCase : Any = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 703 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 0 |
from math import pi, sqrt
def __lowerCamelCase ( _lowercase ) -> Dict:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__SCREAMING_SNAKE_CASE ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__SCREAMING_SNAKE_CASE )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __lowerCamelCase ( ) -> Union[str, Any]:
assert gamma(0.5 ) == sqrt(__SCREAMING_SNAKE_CASE )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : int = 1.0
while num:
a : Dict = float(input("""Gamma of: """))
print(F'''gamma({num}) = {gamma(num)}''')
print("""\nEnter 0 to exit...""")
| 704 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCamelCase_ ( _UpperCAmelCase ):
@slow
@require_torch
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
UpperCAmelCase : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase : Tuple = bertabert.config.encoder.vocab_size
UpperCAmelCase : int = tokenizer.sep_token_id
UpperCAmelCase : str = tokenizer.cls_token_id
UpperCAmelCase : List[Any] = 128
UpperCAmelCase : Tuple = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
UpperCAmelCase : List[Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
UpperCAmelCase : List[str] = train_dataset.select(range(32 ) )
UpperCAmelCase : Optional[int] = val_dataset.select(range(16 ) )
UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(A ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase : Tuple = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=A_ , max_length=512 )
UpperCAmelCase : str = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=A_ , max_length=128 )
UpperCAmelCase : Any = inputs.input_ids
UpperCAmelCase : List[str] = inputs.attention_mask
UpperCAmelCase : Dict = outputs.input_ids
UpperCAmelCase : Union[str, Any] = outputs.input_ids.copy()
UpperCAmelCase : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
UpperCAmelCase : Dict = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A ):
UpperCAmelCase : Optional[int] = pred.label_ids
UpperCAmelCase : Tuple = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase : List[Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
UpperCAmelCase : Tuple = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
UpperCAmelCase : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
UpperCAmelCase : Optional[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
UpperCAmelCase : str = self.get_auto_remove_tmp_dir()
UpperCAmelCase : List[Any] = SeqaSeqTrainingArguments(
output_dir=A_ , per_device_train_batch_size=A_ , per_device_eval_batch_size=A_ , predict_with_generate=A_ , evaluation_strategy="""steps""" , do_train=A_ , do_eval=A_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase : List[str] = SeqaSeqTrainer(
model=A_ , args=A_ , compute_metrics=_compute_metrics , train_dataset=A_ , eval_dataset=A_ , tokenizer=A_ , )
# start training
trainer.train()
| 705 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a : List[str] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a : List[Any] = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : str = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_lowerCamelCase )[0]
@deprecated(_lowerCamelCase , """Please use tf.data to implement this functionality.""" )
def __lowerCamelCase ( _lowercase ) -> Any:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_lowerCamelCase ) as bytestream:
UpperCAmelCase : str = _readaa(_lowerCamelCase )
if magic != 2_0_5_1:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
UpperCAmelCase : List[str] = _readaa(_lowerCamelCase )
UpperCAmelCase : Dict = _readaa(_lowerCamelCase )
UpperCAmelCase : Optional[int] = _readaa(_lowerCamelCase )
UpperCAmelCase : Dict = bytestream.read(rows * cols * num_images )
UpperCAmelCase : Optional[int] = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta )
UpperCAmelCase : Dict = data.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 1 )
return data
@deprecated(_lowerCamelCase , """Please use tf.one_hot on tensors.""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = labels_dense.shape[0]
UpperCAmelCase : str = numpy.arange(_lowerCamelCase ) * num_classes
UpperCAmelCase : str = numpy.zeros((num_labels, num_classes) )
UpperCAmelCase : Tuple = 1
return labels_one_hot
@deprecated(_lowerCamelCase , """Please use tf.data to implement this functionality.""" )
def __lowerCamelCase ( _lowercase , _lowercase=False , _lowercase=1_0 ) -> Dict:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_lowerCamelCase ) as bytestream:
UpperCAmelCase : int = _readaa(_lowerCamelCase )
if magic != 2_0_4_9:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
UpperCAmelCase : Any = _readaa(_lowerCamelCase )
UpperCAmelCase : List[Any] = bytestream.read(_lowerCamelCase )
UpperCAmelCase : Dict = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCamelCase , _lowerCamelCase )
return labels
class UpperCamelCase_ :
@deprecated(
A , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , A , A , A=False , A=False , A=dtypes.floataa , A=True , A=None , ) -> List[str]:
UpperCAmelCase : Optional[int] = random_seed.get_seed(A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCAmelCase : Optional[Any] = dtypes.as_dtype(A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
UpperCAmelCase : str = 10000
UpperCAmelCase : Optional[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
UpperCAmelCase : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCAmelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCAmelCase : List[Any] = images.astype(numpy.floataa )
UpperCAmelCase : Optional[Any] = numpy.multiply(A , 1.0 / 2_5_5.0 )
UpperCAmelCase : Optional[Any] = images
UpperCAmelCase : List[Any] = labels
UpperCAmelCase : str = 0
UpperCAmelCase : Union[str, Any] = 0
@property
def _lowercase( self ) -> Any:
return self._images
@property
def _lowercase( self ) -> Dict:
return self._labels
@property
def _lowercase( self ) -> str:
return self._num_examples
@property
def _lowercase( self ) -> Dict:
return self._epochs_completed
def _lowercase( self , A , A=False , A=True ) -> Optional[int]:
if fake_data:
UpperCAmelCase : Any = [1] * 784
UpperCAmelCase : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A )],
[fake_label for _ in range(A )],
)
UpperCAmelCase : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCAmelCase : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(A )
UpperCAmelCase : int = self.images[perma]
UpperCAmelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCAmelCase : Optional[int] = self._num_examples - start
UpperCAmelCase : Optional[int] = self._images[start : self._num_examples]
UpperCAmelCase : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCAmelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(A )
UpperCAmelCase : Optional[Any] = self.images[perm]
UpperCAmelCase : Tuple = self.labels[perm]
# Start next epoch
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Union[str, Any] = batch_size - rest_num_examples
UpperCAmelCase : List[str] = self._index_in_epoch
UpperCAmelCase : Dict = self._images[start:end]
UpperCAmelCase : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCAmelCase : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCamelCase , """Please write your own downloading logic.""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
if not gfile.Exists(_lowerCamelCase ):
gfile.MakeDirs(_lowerCamelCase )
UpperCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not gfile.Exists(_lowerCamelCase ):
urllib.request.urlretrieve(_lowerCamelCase , _lowerCamelCase ) # noqa: S310
with gfile.GFile(_lowerCamelCase ) as f:
UpperCAmelCase : Any = f.size()
print("""Successfully downloaded""" , _lowerCamelCase , _lowerCamelCase , """bytes.""" )
return filepath
@deprecated(
_lowerCamelCase , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def __lowerCamelCase ( _lowercase , _lowercase=False , _lowercase=False , _lowercase=dtypes.floataa , _lowercase=True , _lowercase=5_0_0_0 , _lowercase=None , _lowercase=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCamelCase , one_hot=_lowerCamelCase , dtype=_lowerCamelCase , seed=_lowerCamelCase )
UpperCAmelCase : Optional[int] = fake()
UpperCAmelCase : Tuple = fake()
UpperCAmelCase : List[str] = fake()
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase )
if not source_url: # empty string check
UpperCAmelCase : str = DEFAULT_SOURCE_URL
UpperCAmelCase : Optional[int] = "train-images-idx3-ubyte.gz"
UpperCAmelCase : Dict = "train-labels-idx1-ubyte.gz"
UpperCAmelCase : List[str] = "t10k-images-idx3-ubyte.gz"
UpperCAmelCase : List[str] = "t10k-labels-idx1-ubyte.gz"
UpperCAmelCase : Optional[int] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_images_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
UpperCAmelCase : int = _extract_images(_lowerCamelCase )
UpperCAmelCase : Optional[Any] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_labels_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
UpperCAmelCase : int = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase )
UpperCAmelCase : int = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_images_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
UpperCAmelCase : Optional[int] = _extract_images(_lowerCamelCase )
UpperCAmelCase : str = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_labels_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
UpperCAmelCase : List[str] = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase )
if not 0 <= validation_size <= len(_lowerCamelCase ):
UpperCAmelCase : str = (
"Validation size should be between 0 and "
F'''{len(_lowerCamelCase )}. Received: {validation_size}.'''
)
raise ValueError(_lowerCamelCase )
UpperCAmelCase : Any = train_images[:validation_size]
UpperCAmelCase : Optional[Any] = train_labels[:validation_size]
UpperCAmelCase : Optional[int] = train_images[validation_size:]
UpperCAmelCase : Tuple = train_labels[validation_size:]
UpperCAmelCase : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
UpperCAmelCase : Union[str, Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase : str = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase : Optional[Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase )
| 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> int:
UpperCAmelCase : int = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : List[str] = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Any = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : List[Any] = num_choices
UpperCAmelCase : Union[str, Any] = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = None
if self.use_input_mask:
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : List[str] = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , use_stable_embedding=__UpperCamelCase , )
def _lowercase( self , A , A , A , A , A , A , A ) -> List[Any]:
UpperCAmelCase : str = OpenLlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase : List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
UpperCAmelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> str:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = OpenLlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase : Tuple = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
UpperCAmelCase : int = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
UpperCAmelCase : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
UpperCAmelCase : Optional[Any] = OpenLlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase : Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> Dict:
UpperCAmelCase : List[Any] = True
UpperCAmelCase : int = True
UpperCAmelCase : Union[str, Any] = OpenLlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
UpperCAmelCase : Tuple = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
UpperCAmelCase : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : int = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
UpperCAmelCase : Optional[int] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _lowercase( self ) -> Any:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = OpenLlamaModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _lowercase( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _lowercase( self ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : List[str] = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : List[Any] = OpenLlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = 3
UpperCAmelCase : Tuple = """single_label_classification"""
UpperCAmelCase : List[str] = input_dict["""input_ids"""]
UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Any:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = 3
UpperCAmelCase : List[str] = """multi_label_classification"""
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : str = OpenLlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase : Any = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Tuple:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Tuple = OpenLlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
UpperCAmelCase : List[Any] = original_model(__UpperCamelCase ).last_hidden_state
UpperCAmelCase : str = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
UpperCAmelCase : Dict = scaled_model(__UpperCamelCase ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 0 |
'''simple docstring'''
class UpperCamelCase_ :
def __init__( self ) -> Dict:
UpperCAmelCase : Any = """"""
UpperCAmelCase : List[str] = """"""
UpperCAmelCase : List[Any] = []
def _lowercase( self , A , A ) -> Union[str, Any]:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase : Tuple = self.__min_dist_top_down_dp(A , n - 1 )
UpperCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , A )
UpperCAmelCase : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase : Optional[int] = 1 + min(A , A , A )
return self.dp[m][n]
def _lowercase( self , A , A ) -> List[str]:
UpperCAmelCase : List[Any] = worda
UpperCAmelCase : int = worda
UpperCAmelCase : str = [[-1 for _ in range(len(A ) )] for _ in range(len(A ) )]
return self.__min_dist_top_down_dp(len(A ) - 1 , len(A ) - 1 )
def _lowercase( self , A , A ) -> int:
UpperCAmelCase : str = worda
UpperCAmelCase : Any = worda
UpperCAmelCase : str = len(A )
UpperCAmelCase : Tuple = len(A )
UpperCAmelCase : Union[str, Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase : List[Any] = j
elif j == 0: # second string is empty
UpperCAmelCase : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase : List[Any] = self.dp[i - 1][j - 1]
else:
UpperCAmelCase : int = self.dp[i][j - 1]
UpperCAmelCase : List[Any] = self.dp[i - 1][j]
UpperCAmelCase : Union[str, Any] = self.dp[i - 1][j - 1]
UpperCAmelCase : List[Any] = 1 + min(A , A , A )
return self.dp[m][n]
if __name__ == "__main__":
a : Tuple = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
a : Optional[Any] = input("""Enter the first string: """).strip()
a : int = input("""Enter the second string: """).strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 708 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : Optional[int] = """▁"""
a : str = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
a : int = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
a : Tuple = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
a : Optional[int] = {
"""ernie-m-base""": 5_1_4,
"""ernie-m-large""": 5_1_4,
}
a : Union[str, Any] = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class UpperCamelCase_ ( __A ):
lowercase = ["input_ids"]
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = RESOURCE_FILES_NAMES
def __init__( self , A , A=None , A=False , A="utf8" , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A = None , **A , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[Any] = sentencepiece_model_ckpt
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase : Optional[int] = self.load_vocab(filepath=UpperCamelCase__ )
else:
UpperCAmelCase : Dict = {self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase : Tuple = {v: k for k, v in self.vocab.items()}
def _lowercase( self , A ) -> Tuple:
if text is None:
return None
UpperCAmelCase : Optional[Any] = self.tokenize(UpperCamelCase__ )
UpperCAmelCase : Dict = '', []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase : Any = self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
UpperCAmelCase : Tuple = unicodedata.normalize("""NFKC""" , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
UpperCAmelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase : Dict = token[1:]
UpperCAmelCase : List[str] = text[offset:].index(UpperCamelCase__ ) + offset
UpperCAmelCase : Tuple = start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase : List[Any] = end
return token_mapping
@property
def _lowercase( self ) -> List[str]:
return len(self.vocab )
def _lowercase( self ) -> int:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.__dict__.copy()
UpperCAmelCase : int = None
return state
def __setstate__( self , A ) -> List[str]:
UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : Tuple = {}
UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase( self , A ) -> Optional[int]:
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def _lowercase( self , A , A=False , A=64 , A=0.1 ) -> Union[str, Any]:
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
UpperCAmelCase : int = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
UpperCAmelCase : Tuple = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
UpperCAmelCase : Dict = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
UpperCAmelCase : Optional[int] = self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
UpperCAmelCase : Dict = self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase : Any = []
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
UpperCAmelCase : Optional[Any] = 0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
UpperCAmelCase : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : str = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : List[Any] = i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase : Optional[int] = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Dict = self.convert_ids_to_tokens(UpperCamelCase__ )
UpperCAmelCase : int = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def _lowercase( self , A ) -> str:
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def _lowercase( self , A ) -> Union[str, Any]:
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def _lowercase( self , A , A=None ) -> Any:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase( self , A , A=None ) -> Optional[Any]:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase( self , A , A=None , A=False ) -> Tuple:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowercase( self , A , A = None ) -> int:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def _lowercase( self , A ) -> Optional[int]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase( self , A ) -> List[str]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase( self , A ) -> List[str]:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase( self , A ) -> List[str]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
UpperCAmelCase : int = unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def _lowercase( self , A ) -> Dict:
UpperCAmelCase : Optional[Any] = {}
with io.open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase__ ):
UpperCAmelCase : Union[str, Any] = line.rstrip("""\n""" )
UpperCAmelCase : Tuple = int(UpperCamelCase__ )
return token_to_idx
def _lowercase( self , A , A = None ) -> List[str]:
UpperCAmelCase : Union[str, Any] = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase : int = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
UpperCAmelCase : List[Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
UpperCAmelCase : str = token_index
writer.write(token + """\n""" )
index += 1
UpperCAmelCase : Optional[Any] = os.path.join(UpperCamelCase__ , """sentencepiece.bpe.model""" )
with open(UpperCamelCase__ , """wb""" ) as fi:
UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 709 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowerCamelCase ( _lowercase ) -> Dict:
return EnvironmentCommand()
class UpperCamelCase_ ( a__ ):
@staticmethod
def _lowercase( A ) -> int:
UpperCAmelCase : Dict = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCamelCase_ )
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = huggingface_hub.__version__
UpperCAmelCase : Optional[Any] = """not installed"""
UpperCAmelCase : Any = """NA"""
if is_torch_available():
import torch
UpperCAmelCase : List[Any] = torch.__version__
UpperCAmelCase : Any = torch.cuda.is_available()
UpperCAmelCase : List[str] = """not installed"""
if is_transformers_available():
import transformers
UpperCAmelCase : List[str] = transformers.__version__
UpperCAmelCase : Optional[int] = """not installed"""
if is_accelerate_available():
import accelerate
UpperCAmelCase : Any = accelerate.__version__
UpperCAmelCase : str = """not installed"""
if is_xformers_available():
import xformers
UpperCAmelCase : str = xformers.__version__
UpperCAmelCase : Any = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCamelCase_ ) )
return info
@staticmethod
def _lowercase( A ) -> List[Any]:
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 710 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
if not (isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
UpperCAmelCase : Optional[Any] = len(_lowercase )
UpperCAmelCase : Optional[Any] = len(_lowercase )
UpperCAmelCase : Optional[int] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
UpperCAmelCase : Tuple = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
UpperCAmelCase : Optional[int] = i
UpperCAmelCase : Union[str, Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 0 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a : str = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __lowerCamelCase ( _lowercase=True ) -> Any:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class UpperCamelCase_ ( __UpperCAmelCase ):
lowercase = None
lowercase = None
def _lowercase( self , A , A ) -> str:
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase : str = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
UpperCAmelCase : Dict = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
UpperCAmelCase : Optional[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase : Any = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
UpperCAmelCase : List[str] = dataset_module_factory("""wikipedia""" , cache_dir=__lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = import_main_class(dataset_module.module_path )
UpperCAmelCase : Optional[int] = builder_cls(
cache_dir=__lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase : int = None
builder_instance.download_and_prepare()
UpperCAmelCase : int = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : int = dataset_module_factory("""wikipedia""" , cache_dir=__lowerCAmelCase )
UpperCAmelCase : Optional[Any] = import_main_class(dataset_module.module_path , dataset=__lowerCAmelCase )
UpperCAmelCase : Optional[int] = builder_cls(
cache_dir=__lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
UpperCAmelCase : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , __lowerCAmelCase )
assert next(iter(ds["""train"""] ) )
| 712 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : Tuple = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , ) -> List[str]:
UpperCAmelCase : Tuple = {}
if train_file is not None:
UpperCAmelCase : List[Any] = [train_file]
if eval_file is not None:
UpperCAmelCase : Union[str, Any] = [eval_file]
if test_file is not None:
UpperCAmelCase : Optional[Any] = [test_file]
UpperCAmelCase : Optional[Any] = datasets.load_dataset("""csv""" , data_files=UpperCAmelCase__ )
UpperCAmelCase : Any = list(ds[list(files.keys() )[0]].features.keys() )
UpperCAmelCase : List[Any] = features_name.pop(UpperCAmelCase__ )
UpperCAmelCase : Any = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCAmelCase : Tuple = {label: i for i, label in enumerate(UpperCAmelCase__ )}
UpperCAmelCase : List[Any] = tokenizer.model_input_names
UpperCAmelCase : Union[str, Any] = {}
if len(UpperCAmelCase__ ) == 1:
for k in files.keys():
UpperCAmelCase : Tuple = ds[k].map(
lambda _lowercase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="""max_length""" ) , batched=UpperCAmelCase__ , )
elif len(UpperCAmelCase__ ) == 2:
for k in files.keys():
UpperCAmelCase : Optional[int] = ds[k].map(
lambda _lowercase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="""max_length""" , ) , batched=UpperCAmelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCAmelCase : List[str] = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCAmelCase : str = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCAmelCase : Dict = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase : Tuple = labelaid[ex[label_name]]
yield (d, label)
UpperCAmelCase : Any = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCAmelCase : Dict = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCAmelCase : Any = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCAmelCase : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCAmelCase : int = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCAmelCase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
lowercase = field(metadata={'help': 'Which column contains the label'} )
lowercase = field(default=_a , metadata={'help': 'The path of the training file'} )
lowercase = field(default=_a , metadata={'help': 'The path of the development file'} )
lowercase = field(default=_a , metadata={'help': 'The path of the test file'} )
lowercase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase = field(
default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class UpperCamelCase_ :
lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase = field(
default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase = field(default=_a , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def __lowerCamelCase ( ) -> List[Any]:
UpperCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCAmelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCAmelCase__ ) , labelaid=UpperCAmelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCAmelCase : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowercase ) -> Dict:
UpperCAmelCase : Any = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCAmelCase : Optional[Any] = TFTrainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase : int = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCAmelCase : Any = trainer.evaluate()
UpperCAmelCase : Tuple = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(UpperCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
# Initialise PyTorch model
UpperCAmelCase : str = RemBertConfig.from_json_file(UpperCAmelCase__ )
print("""Building PyTorch model from configuration: {}""".format(str(UpperCAmelCase__ ) ) )
UpperCAmelCase : Tuple = RemBertModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(UpperCAmelCase__ ) )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a : List[Any] = logging.get_logger(__name__)
a : Optional[int] = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = """layoutlmv3"""
def __init__( self , A=50265 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-5 , A=1 , A=0 , A=2 , A=1024 , A=128 , A=128 , A=True , A=32 , A=128 , A=64 , A=256 , A=True , A=True , A=True , A=224 , A=3 , A=16 , A=None , **A , ) -> int:
super().__init__(
vocab_size=A , hidden_size=A , num_hidden_layers=A , num_attention_heads=A , intermediate_size=A , hidden_act=A , hidden_dropout_prob=A , attention_probs_dropout_prob=A , max_position_embeddings=A , type_vocab_size=A , initializer_range=A , layer_norm_eps=A , pad_token_id=A , bos_token_id=A , eos_token_id=A , **A , )
UpperCAmelCase : Optional[int] = max_ad_position_embeddings
UpperCAmelCase : Optional[Any] = coordinate_size
UpperCAmelCase : int = shape_size
UpperCAmelCase : str = has_relative_attention_bias
UpperCAmelCase : Optional[int] = rel_pos_bins
UpperCAmelCase : List[str] = max_rel_pos
UpperCAmelCase : Tuple = has_spatial_attention_bias
UpperCAmelCase : Union[str, Any] = rel_ad_pos_bins
UpperCAmelCase : Optional[Any] = max_rel_ad_pos
UpperCAmelCase : Optional[int] = text_embed
UpperCAmelCase : Union[str, Any] = visual_embed
UpperCAmelCase : str = input_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : int = classifier_dropout
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.12' )
@property
def _lowercase( self ) -> Any:
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def _lowercase( self ) -> int:
return 1e-5
@property
def _lowercase( self ) -> str:
return 12
def _lowercase( self , A , A = -1 , A = -1 , A = False , A = None , A = 3 , A = 40 , A = 40 , ) -> Union[str, Any]:
setattr(processor.image_processor , """apply_ocr""" , A )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Any = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : Optional[int] = processor.tokenizer.num_special_tokens_to_add(A )
UpperCAmelCase : Dict = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase : List[str] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase : Tuple = self._generate_dummy_images(A , A , A , A )
UpperCAmelCase : Optional[Any] = dict(
processor(
A , text=A , boxes=A , return_tensors=A , ) )
return inputs
| 716 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 0 |
'''simple docstring'''
import argparse
import json
import subprocess
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : int = []
UpperCAmelCase : Optional[Any] = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
UpperCAmelCase : Optional[Any] = subprocess.run(_lowercase , shell=_lowercase , stdout=subprocess.PIPE )
UpperCAmelCase : Dict = output.stdout.decode("""utf-8""" )
UpperCAmelCase : Dict = json.loads(_lowercase )
UpperCAmelCase : Any = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowercase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(_lowercase ) )
if len(_lowercase ) > 0:
UpperCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase ) -> Any:
return values.split(""",""" )
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
a : Union[str, Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a = sys.version_info >= (3, 1_0)
def __lowerCamelCase ( _lowercase=None , _lowercase=None ) -> Dict:
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
lowercase = False
lowercase = True
lowercase = None
class UpperCamelCase_ ( __lowerCAmelCase ):
lowercase = "titi"
lowercase = "toto"
class UpperCamelCase_ ( __lowerCAmelCase ):
lowercase = "titi"
lowercase = "toto"
lowercase = 42
@dataclass
class UpperCamelCase_ :
lowercase = "toto"
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
lowercase = "toto"
def _lowercase( self ) -> Tuple:
UpperCAmelCase : List[str] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = field(default=__lowerCAmelCase , metadata={'help': 'help message'} )
lowercase = None
lowercase = list_field(default=[] )
lowercase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
lowercase = list_field(default=[] )
lowercase = list_field(default=[1, 2, 3] )
lowercase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
lowercase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
lowercase = field()
lowercase = field()
lowercase = field()
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = field()
lowercase = None
lowercase = field(default='toto' , metadata={'help': 'help message'} )
lowercase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
lowercase = False
lowercase = True
lowercase = None
@dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = field(default=__lowerCAmelCase , metadata={'help': 'help message'} )
lowercase = None
lowercase = list_field(default=[] )
lowercase = list_field(default=[] )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A ) -> Union[str, Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase : List[str] = {k: v for k, v in vars(lowerCamelCase__ ).items() if k != '''container'''}
UpperCAmelCase : int = {k: v for k, v in vars(lowerCamelCase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowerCamelCase__ ) and yy.get("""choices""" , lowerCamelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowerCamelCase__ ) , yy["""type"""](lowerCamelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--bar""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--baz""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--flag""" , type=lowerCamelCase__ , default=lowerCamelCase__ , const=lowerCamelCase__ , nargs="""?""" )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase : List[Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(UpperCAmelCase ) : Tuple = parser.parse_args_into_dataclasses(lowerCamelCase__ , look_for_args_file=lowerCamelCase__ )
self.assertFalse(example.flag )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowerCamelCase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase__ , help="""help message""" )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase__ , default=lowerCamelCase__ , const=lowerCamelCase__ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowerCamelCase__ , default=lowerCamelCase__ , const=lowerCamelCase__ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowerCamelCase__ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowerCamelCase__ , default=lowerCamelCase__ )
UpperCAmelCase : List[str] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase__ )
for dataclass_type in dataclass_types:
UpperCAmelCase : List[str] = HfArgumentParser(lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase : Tuple = parser.parse_args([] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
UpperCAmelCase : Any = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
UpperCAmelCase : Tuple = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
UpperCAmelCase : List[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
UpperCAmelCase : int = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : int = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase : Dict = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase : str = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCAmelCase : int = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase : int = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowercase( self ) -> Union[str, Any]:
@dataclass
class UpperCamelCase_ :
lowercase = "toto"
UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCAmelCase : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCAmelCase : List[Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowerCamelCase__ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowerCamelCase__ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase__ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase : Optional[int] = parser.parse_args([] )
self.assertEqual(
lowerCamelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase : List[Any] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowerCamelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowerCamelCase__ , type=lowerCamelCase__ )
expected.add_argument("""--bar""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowerCamelCase__ , type=lowerCamelCase__ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowerCamelCase__ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowerCamelCase__ )
UpperCAmelCase : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase__ )
for dataclass_type in dataclass_types:
UpperCAmelCase : Tuple = HfArgumentParser(lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , bar=lowerCamelCase__ , baz=lowerCamelCase__ , ces=[] , des=[] ) )
UpperCAmelCase : List[str] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowerCamelCase__ , Namespace(foo=12 , bar=3.1_4 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : int = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--required_str""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase__ , )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[str] = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase__ , )
expected.add_argument("""--opt""" , type=lowerCamelCase__ , default=lowerCamelCase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase__ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : int = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
UpperCAmelCase : List[Any] = parser.parse_dict(lowerCamelCase__ )[0]
UpperCAmelCase : Union[str, Any] = BasicExample(**lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase( self ) -> str:
UpperCAmelCase : Dict = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowerCamelCase__ , parser.parse_dict , lowerCamelCase__ , allow_extra_keys=lowerCamelCase__ )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[Any] = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : Any = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : int = os.path.join(lowerCamelCase__ , """temp_json""" )
os.mkdir(lowerCamelCase__ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase : Dict = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
UpperCAmelCase : Optional[int] = BasicExample(**lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase( self ) -> Any:
UpperCAmelCase : Dict = HfArgumentParser(lowerCamelCase__ )
UpperCAmelCase : Optional[Any] = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Optional[Any] = os.path.join(lowerCamelCase__ , """temp_yaml""" )
os.mkdir(lowerCamelCase__ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
UpperCAmelCase : Union[str, Any] = BasicExample(**lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = HfArgumentParser(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 718 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
a : Optional[Any] = 'us-east-1' # defaults region
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5_500,
}
lowercase = {**hyperparameters, 'max_steps': 1_000}
@property
def _lowercase( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowercase( self ) -> str:
return f'''{self.framework}-transfromers-test'''
@property
def _lowercase( self ) -> str:
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def _lowercase( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 719 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Dict:
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Optional[int] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : List[str] = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : str = embedding_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : Tuple = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : Dict = scope
def _lowercase( self ) -> Any:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Tuple = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Tuple:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Any = MegatronBertModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A , token_type_ids=A )
UpperCAmelCase : Union[str, Any] = model(A , token_type_ids=A )
UpperCAmelCase : Tuple = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Dict:
UpperCAmelCase : Dict = MegatronBertForMaskedLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A ) -> int:
UpperCAmelCase : List[Any] = MegatronBertForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : Dict = MegatronBertForNextSentencePrediction(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : List[Any] = MegatronBertForPreTraining(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , next_sentence_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase( self , A , A , A , A , A , A , A ) -> List[str]:
UpperCAmelCase : List[str] = MegatronBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : Dict = MegatronBertForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[Any] = MegatronBertForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Dict = self.num_choices
UpperCAmelCase : str = MegatronBertForMultipleChoice(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Any = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = config_and_inputs
UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
# test_resize_embeddings = False
lowercase = False
def _lowercase( self , A , A , A=False ) -> Dict:
UpperCAmelCase : Dict = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
UpperCAmelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = MegatronBertModelTester(self )
UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> List[str]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Any:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
a : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def _lowercase( self ) -> Any:
UpperCAmelCase : Dict = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
UpperCAmelCase : str = os.path.join(os.environ["""MYDIR"""] , A )
UpperCAmelCase : Any = MegatronBertModel.from_pretrained(A )
model.to(A )
model.half()
UpperCAmelCase : Union[str, Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase : Any = model(A )[0]
UpperCAmelCase : List[str] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A )
UpperCAmelCase : int = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase : Optional[Any] = output[0, ii, jj]
UpperCAmelCase : Union[str, Any] = expected[3 * ii + jj]
UpperCAmelCase : Tuple = """ii={} jj={} a={} b={}""".format(A , A , A , A )
self.assertTrue(math.isclose(A , A , rel_tol=A , abs_tol=A ) , msg=A )
| 720 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
# TODO Update this
a : Dict = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = """esm"""
def __init__( self , A=None , A=None , A=None , A=768 , A=12 , A=12 , A=3072 , A=0.1 , A=0.1 , A=1026 , A=0.0_2 , A=1e-12 , A="absolute" , A=True , A=None , A=False , A=False , A=None , A=None , **A , ) -> int:
super().__init__(pad_token_id=A , mask_token_id=A , **A )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : List[str] = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = use_cache
UpperCAmelCase : Union[str, Any] = emb_layer_norm_before
UpperCAmelCase : int = token_dropout
UpperCAmelCase : Any = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
UpperCAmelCase : Optional[Any] = EsmFoldConfig()
elif isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = EsmFoldConfig(**A )
UpperCAmelCase : Dict = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
UpperCAmelCase : List[str] = get_default_vocab_list()
else:
UpperCAmelCase : Optional[Any] = vocab_list
else:
UpperCAmelCase : Tuple = None
UpperCAmelCase : List[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , A ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = super().to_dict()
if isinstance(self.esmfold_config , A ):
UpperCAmelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = True
lowercase = False
lowercase = False
lowercase = False
lowercase = 0
lowercase = True
lowercase = False
lowercase = 128
lowercase = None
def _lowercase( self ) -> List[Any]:
if self.trunk is None:
UpperCAmelCase : Union[str, Any] = TrunkConfig()
elif isinstance(self.trunk , A ):
UpperCAmelCase : Tuple = TrunkConfig(**self.trunk )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = asdict(self )
UpperCAmelCase : Any = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase_ :
lowercase = 48
lowercase = 1_024
lowercase = 128
lowercase = 32
lowercase = 32
lowercase = 32
lowercase = 0
lowercase = 0
lowercase = False
lowercase = 4
lowercase = 128
lowercase = None
def _lowercase( self ) -> Dict:
if self.structure_module is None:
UpperCAmelCase : List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , A ):
UpperCAmelCase : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = asdict(self )
UpperCAmelCase : Tuple = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase_ :
lowercase = 384
lowercase = 128
lowercase = 16
lowercase = 128
lowercase = 12
lowercase = 4
lowercase = 8
lowercase = 0.1
lowercase = 8
lowercase = 1
lowercase = 2
lowercase = 7
lowercase = 10
lowercase = 1e-8
lowercase = 1e5
def _lowercase( self ) -> Any:
return asdict(self )
def __lowerCamelCase ( ) -> Tuple:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 721 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=None , A=True , ) -> Tuple:
UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 20}
UpperCAmelCase : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase : Dict = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : Any = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : str = do_resize
UpperCAmelCase : str = size
UpperCAmelCase : int = do_center_crop
UpperCAmelCase : int = crop_size
UpperCAmelCase : List[str] = do_flip_channel_order
def _lowercase( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MobileViTImageProcessor if is_vision_available() else None
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = MobileViTImageProcessingTester(self )
@property
def _lowercase( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """do_center_crop""" ) )
self.assertTrue(hasattr(A , """center_crop""" ) )
self.assertTrue(hasattr(A , """do_flip_channel_order""" ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _lowercase( self ) -> Optional[Any]:
pass
def _lowercase( self ) -> str:
# Initialize image_processing
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase : int = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowercase( self ) -> int:
# Initialize image_processing
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase : List[str] = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowercase( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase : List[str] = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 700 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowerCamelCase ( _lowercase ) -> Tuple:
# getting number of pixels in the image
UpperCAmelCase : List[Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_lowercase ):
for j in range(_lowercase ):
UpperCAmelCase : int = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a : Optional[int] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
a : List[str] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 701 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.